-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
1 changed file
with
25 additions
and
21 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Original file line | Diff line number | Diff line change |
---|---|---|---|
@@ -1,23 +1,27 @@ | |||
# This is a template for a Python scraper on Morph (https://morph.io) | #!/usr/bin/python2.7 | ||
# including some code snippets below that you should find helpful | |||
|
|
||
# import scraperwiki | import scraperwiki | ||
# import lxml.html | #import csv | ||
# | import urllib2 | ||
# # Read in a page | import xlrd | ||
# html = scraperwiki.scrape("http://foo.com") | import zipfile | ||
# | from cStringIO import StringIO | ||
# # Find something on the page using css selectors | |||
# root = lxml.html.fromstring(html) | |||
# root.cssselect("div[align='left']") | |||
# | |||
# # Write out to the sqlite database using scraperwiki library | |||
# scraperwiki.sqlite.save(unique_keys=['name'], data={"name": "susan", "occupation": "software developer"}) | |||
# | |||
# # An arbitrary query against the database | |||
# scraperwiki.sql.select("* from data where 'name'='peter'") | |||
|
|
||
# You don't have to do things with the ScraperWiki and lxml libraries. You can use whatever libraries are installed | url = "http://www.posta.sk/subory/322/psc-obci-a-ulic.zip" | ||
# on Morph for Python (https://github.com/openaustralia/morph-docker-python/blob/master/pip_requirements.txt) and all that matters |
|
||
# is that your final data is written to an Sqlite database called data.sqlite in the current working directory which | archive_file = StringIO(urllib2.urlopen(url).read()) | ||
# has at least a table called data. | archive = zipfile.ZipFile(archive_file) | ||
|
|||
wb = xlrd.open_workbook(file_contents=archive.read('OBCE.XLS')) | |||
sheet = wb.sheets()[0] | |||
with open('psc.csv', 'wb') as f: | |||
#writer = csv.writer(f) | |||
for row in range(1, sheet.nrows): | |||
#writer.writerow([sheet.cell(row, i).value.encode('utf8') for i in cell_idxs]) | |||
data = { | |||
'obec': sheet.cell(row, 1), | |||
'okres': sheet.cell(row, 2), | |||
'psc': sheet.cell(row, 3), | |||
'kraj': sheet.cell(row, 7), | |||
} | |||
scraperwiki.sqlite.save(unique_keys=['obec'], data=data) |