Skip to content

Commit

Permalink
spiders: add elsevier spider
Browse files Browse the repository at this point in the history
  • Loading branch information
MJedr committed Sep 18, 2020
1 parent 1d6c9c7 commit ac5262b
Show file tree
Hide file tree
Showing 17 changed files with 3,209 additions and 12 deletions.
1 change: 1 addition & 0 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ env:
- PYTHON=py2 SUITE=functional_desy
- PYTHON=py2 SUITE=functional_cds
- PYTHON=py2 SUITE=functional_pos
- PYTHON=py2 SUITE=functional_elsevier
- PYTHON=py3 SUITE=unit

matrix:
Expand Down
10 changes: 9 additions & 1 deletion docker-compose.test.py2.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,13 @@ services:
http-server.local:
condition: service_healthy

functional_elsevier:
<<: *service_base
command: py.test -vv tests/functional/elsevier
depends_on:
- scrapyd
- localstack

unit:
<<: *service_base
command: bash -c "py.test tests/unit -vv && make -C docs clean && make -C docs html && python setup.py sdist && ls dist/*"
Expand Down Expand Up @@ -196,12 +203,13 @@ services:
localstack:
image: localstack/localstack:latest
ports:
- '4572:4572'
- '4566:4566'
environment:
- SERVICES=s3
- DEBUG=1
- DATA_DIR=/home/localstack/data
- HOSTNAME_EXTERNAL=localstack
- HOSTNAME=localstack

networks:
ftp:
Expand Down
27 changes: 17 additions & 10 deletions hepcrawl/parsers/elsevier.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def parse(self):
self.builder.add_doi(**doi)
for keyword in self.keywords:
self.builder.add_keyword(keyword)
self.builder.add_imprint_date(self.publication_date.dumps())
self.builder.add_imprint_date(self.publication_date.dumps() if self.publication_date else None)
for reference in self.references:
self.builder.add_reference(reference)

Expand Down Expand Up @@ -235,6 +235,7 @@ def dois(self):

@property
def document_type(self):
doctype = None
if self.root.xpath("./*[self::article or self::simple-article or self::book-review]"):
doctype = 'article'
elif self.root.xpath("./*[self::book or self::simple-book]"):
Expand Down Expand Up @@ -341,11 +342,17 @@ def page_end(self):

@property
def publication_date(self):
publication_date = None
publication_date_string = self.root.xpath(
'./RDF/Description/coverDisplayDate/text()'
).extract_first()
if publication_date_string:
publication_date = PartialDate.parse(publication_date_string)
try:
publication_date = PartialDate.parse(publication_date_string)
except:
# in case when date contains month range, eg. July-September 2020
publication_date = re.sub("[A-aZ-z]*-(?=[A-aZ-z])", "", publication_date_string)
publication_date = PartialDate.parse(publication_date)
return publication_date

@property
Expand Down Expand Up @@ -383,9 +390,9 @@ def subtitle(self):
def title(self):
title = self.root.xpath(
'./*/head/title//text()'
).extract_first().strip('\n')
).extract_first()

return title
return title.strip('\n') if title else None

@property
def year(self):
Expand Down Expand Up @@ -484,9 +491,9 @@ def get_reference_authors(ref_node):
authors = ref_node.xpath("./contribution/authors/author")
authors_names = []
for author in authors:
given_names = author.xpath("./given-name/text()").extract_first()
last_names = author.xpath("./surname/text()").extract_first()
authors_names.append(" ".join([given_names, last_names]))
given_names = author.xpath("./given-name/text()").extract_first(default="")
last_names = author.xpath("./surname/text()").extract_first(default="")
authors_names.append(" ".join([given_names, last_names]).strip())
return authors_names

@staticmethod
Expand All @@ -502,9 +509,9 @@ def get_reference_editors(ref_node):
editors = ref_node.xpath(".//editors/authors/author")
editors_names = []
for editor in editors:
given_names = editor.xpath("./given-name/text()").extract_first()
last_names = editor.xpath("./surname/text()").extract_first()
editors_names.append(" ".join([given_names, last_names]))
given_names = editor.xpath("./given-name/text()").extract_first(default="")
last_names = editor.xpath("./surname/text()").extract_first(default="")
editors_names.append(" ".join([given_names, last_names]).strip())
return editors_names

@staticmethod
Expand Down
190 changes: 190 additions & 0 deletions hepcrawl/spiders/elsevier.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
import glob
import os
import shutil
import tempfile
import xml.etree.ElementTree as et
import zipfile

import boto3
import requests

from ..parsers import ElsevierParser
from ..utils import ParsedItem, strict_kwargs


class ElsevierSpider:
def __init__(
self,
acces_key_id,
secret_access_key,
packages_bucket_name,
files_bucket_name,
elsevier_consyn_key,
s3_host="https://s3.cern.ch",
):
self.access_key_id = acces_key_id
self.secret_access_key = secret_access_key
self.packages_bucket_name = packages_bucket_name
self.files_bucket_name = files_bucket_name
self.elsevier_consyn_key = elsevier_consyn_key
self.new_packages = set()
self.new_xml_files = set()
self.s3_host = s3_host

if not (
self.access_key_id,
self.secret_access_key,
self.packages_bucket_name,
self.files_bucket_name,
):
raise Exception("Missing parametrs necessary to establish s3 connection")
else:
self.s3_connection = self.create_s3_connection()
self.s3_packages_bucket_conn = self.s3_bucket_connection(
self.packages_bucket_name
)
self.s3_files_bucket_conn = self.s3_bucket_connection(
self.files_bucket_name
)

def create_s3_connection(self):
config = {
"aws_access_key_id": self.access_key_id,
"aws_secret_access_key": self.secret_access_key,
}
session = boto3.Session(
aws_access_key_id=config.get("aws_access_key_id"),
aws_secret_access_key=config.get("aws_secret_access_key"),
)
s3 = session.resource("s3", endpoint_url=self.s3_host)
return s3

def s3_bucket_connection(self, bucket_name):
bucket_connection = self.s3_connection.Bucket(bucket_name)
return bucket_connection

def _get_keys_names_from_bucket(self):
keys = set([key.key for key in self.s3_packages_bucket_conn.objects.all()])
return keys

def _download_elsevier_metadata(self):
elsevier_batch_download_url = (
"https://consyn.elsevier.com/batch/atom?key=" + self.elsevier_consyn_key
)
packages_metadata = requests.get(elsevier_batch_download_url)
return packages_metadata

def _get_package_urls_from_elsevier(self):
"""
Extracts names and urls of the zip packages from elsevier batch feed
Returns:
dict(name: url): dict of zip packages names and urls
"""
packages_metadata = self._download_elsevier_metadata()
packages_metadata_parsed = et.fromstring(packages_metadata.text)
urls_for_packages = {}
for children in packages_metadata_parsed.getchildren():
if "entry" in children.tag:
file_data = children.getchildren()
link = file_data[1].attrib["href"]
urls_for_packages[file_data[0].text] = link
return urls_for_packages

def _get_all_new_packages(self):
"""
Checks which packages from elsevier batch feed are not in the s3 bucket yet
Returns:
dict(name: url): dict of zip packages names and urls
"""
urls_for_packages = self._get_package_urls_from_elsevier()
bucket_data = self._get_keys_names_from_bucket()
packages_not_in_bucket = {
name: urls_for_packages[name]
for name in urls_for_packages.keys() - bucket_data
}
self.new_packages = set(packages_not_in_bucket.keys())
return packages_not_in_bucket

def populate_s3_bucket_with_elsevier_packages(self):
"""
Uploads to s3 bucket new zip folders containing xml-s for elsevier articles
"""
for name, url in self._get_all_new_packages().items():
if name.lower().endswith("zip"):
request = requests.get(url, stream=True)
request_data = request.raw
self.s3_packages_bucket_conn.upload_fileobj(request_data, name)

@staticmethod
def _get_doi_for_xml_file(xml_file):
parser = ElsevierParser(xml_file)
doi = parser.get_identifier()
return doi

def extract_zip_packages_to_s3(self):
"""
Extracts the files from zip folders downloaded from elsevier and
uploads them with a correct name (article doi) to the correct s3 bucket
Yields:
HEP records
"""
for package in self.new_packages:
tempdir = tempfile.mkdtemp()
self.s3_packages_bucket_conn.download_file(
package,
"{tempdir}/{package}.zip".format(
tempdir=tempdir, package=package.lstrip(".ZIP")
),
)
with zipfile.ZipFile(
"{tempdir}/{package}.zip".format(
tempdir=tempdir, package=package.lstrip(".ZIP")
),
"r",
) as zip_package:
zip_package.extractall(tempdir)
for file in glob.iglob(
"{tempdir}/**/*.xml".format(tempdir=tempdir), recursive=True
):
if file.endswith(".xml"):
with open(file) as f:
elsevier_xml = f.read()
file_doi = self._get_doi_for_xml_file(elsevier_xml)
self.s3_files_bucket_conn.upload_file(
file, "{file_doi}.xml".format(file_doi=file_doi)
)
self.new_xml_files.add("{file_doi}.xml".format(file_doi=file_doi))
shutil.rmtree(tempdir)

def parse_items_from_s3(self, new=True):
"""
Parse xml files in the s3 bucket
Yields:
HEP records
"""
tempdir = tempfile.mkdtemp()
if new:
files_to_parse = self.new_xml_files
else:
files_to_parse = [
key.key for key in self.s3_files_bucket_conn.objects.all()
]
for filename in files_to_parse:
file_path = "{tempdir}/{filename}".format(
tempdir=tempdir, filename=filename.replace("/", "_")
)
self.s3_files_bucket_conn.download_file(filename, file_path)
with open(file_path) as f:
elsevier_xml = f.read()
yield self.parse_record(elsevier_xml)
shutil.rmtree(tempdir)

@staticmethod
def parse_record(selector):
"""Parse an elsevier XML exported file into a HEP record."""
parser = ElsevierParser(selector)

return ParsedItem(record=parser.parse(), record_format="hep",)
2 changes: 1 addition & 1 deletion tests/functional/desy/test_desy.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def setup_s3_files(s3_key, s3_secret, s3_server, s3_input_bucket, s3_output_buck
def get_s3_settings():
key = 'key'
secret = 'secret'
s3_host = 'http://localstack:4572'
s3_host = 'http://localstack:4566'
input_bucket = 'incoming'
output_bucket = 'processed'

Expand Down
Binary file not shown.

Large diffs are not rendered by default.

Loading

0 comments on commit ac5262b

Please sign in to comment.