From 35f992030fe91ed200b886fbe7c99a5a56db5833 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A1ll=20Hilmarsson?= Date: Wed, 21 Sep 2016 08:58:05 +0000 Subject: [PATCH] initial commit --- requirements.txt | 2 ++ scraper.py | 47 +++++++++++++++++++++++++---------------------- 2 files changed, 27 insertions(+), 22 deletions(-) diff --git a/requirements.txt b/requirements.txt index fce25cc..67224b7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,3 +7,5 @@ lxml==3.4.4 cssselect==0.9.1 +requests +python-dateutil diff --git a/scraper.py b/scraper.py index 69bea68..4d9bdad 100644 --- a/scraper.py +++ b/scraper.py @@ -1,24 +1,27 @@ -# This is a template for a Python scraper on morph.io (https://morph.io) -# including some code snippets below that you should find helpful +# -*- coding: utf-8 -*- -# import scraperwiki -# import lxml.html -# -# # Read in a page -# html = scraperwiki.scrape("http://foo.com") -# -# # Find something on the page using css selectors -# root = lxml.html.fromstring(html) -# root.cssselect("div[align='left']") -# -# # Write out to the sqlite database using scraperwiki library -# scraperwiki.sqlite.save(unique_keys=['name'], data={"name": "susan", "occupation": "software developer"}) -# -# # An arbitrary query against the database -# scraperwiki.sql.select("* from data where 'name'='peter'") +import scraperwiki +import lxml.html +import requests +import urlparse +from dateutil import parser -# You don't have to do things with the ScraperWiki and lxml libraries. -# You can use whatever libraries you want: https://morph.io/documentation/python -# All that matters is that your final data is written to an SQLite database -# called "data.sqlite" in the current working directory which has at least a table -# called "data". +BASE_URL = "http://www.kopavogur.is/stjornsyslan/fundargerdir/" +DATA_URL = "http://www.kopavogur.is/stjornsyslan/fundargerdir/searchmeetings.aspx" + +r = requests.get(DATA_URL) +root = lxml.html.fromstring(r.text) +items = root.xpath("//span[@id='l_Content']/table/tr") + +data = [] + +for item in items[1:]: + meeting = {} + meeting["titill"] = item[1].text + meeting["url"] = urlparse.urljoin(BASE_URL, item[0][0].attrib["href"]) + meeting["dagsetning"] = item[2].text + meeting["date"] = parser.parse(item[2].text) + meeting["nefnd"] = item[0][0].text + data.append(meeting) +scraperwiki.sqlite.save(unique_keys=['url'], + data=data)