-
Notifications
You must be signed in to change notification settings - Fork 16
/
test_feed.py
94 lines (78 loc) · 4.23 KB
/
test_feed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import time
from doajtest.helpers import DoajTestCase
from portality import models
from portality.view import atom
from lxml import etree
class TestFeed(DoajTestCase):
def test_01_object(self):
# first try requesting a feed over the empty test index
f = atom.get_feed("http://my.test.com")
assert len(f.entries.keys()) == 0
assert f.url == "http://my.test.com"
# now populate the index and then re-get the feed
ids = []
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal {x}".format(x=i)
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_subject("LCC", "Agriculture")
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
ids.append(j.id)
# make sure the last updated dates are suitably different
time.sleep(1)
time.sleep(1)
with self.app_test.test_request_context('/feed'):
f = atom.get_feed("http://my.test.com")
assert len(f.entries.keys()) == 5
# now go through the entries in order, and check they are as expected
entry_dates = f.entries.keys()
for i in range(5):
e = f.entries.get(sorted(entry_dates)[i])[0]
assert e["author"] == "Test Publisher {x}".format(x=i)
assert len(e["categories"]) == 1
assert e["categories"][0] == "LCC:Agriculture"
assert e["content_src"].endswith("{x}000-0000?rss".format(x=i))
assert e["alternate"].endswith("{x}000-0000?rss".format(x=i))
assert e["id"] == "urn:uuid:" + ids[i]
assert e["related"] == "http://homepage.com/{x}".format(x=i)
assert "rights" in e
assert e["summary"].startswith("Published by Test Publisher {x}".format(x=i))
assert e["title"] == "Test Journal {x} ({x}000-0000)".format(x=i)
assert "updated" in e
def test_02_xml(self):
# now populate the index and then re-get the feed
ids = []
for i in range(5):
j = models.Journal()
j.set_in_doaj(True)
bj = j.bibjson()
bj.title = "Test Journal {x}".format(x=i)
bj.add_identifier(bj.P_ISSN, "{x}000-0000".format(x=i))
bj.publisher = "Test Publisher {x}".format(x=i)
bj.add_subject("LCC", "Agriculture")
bj.add_url("http://homepage.com/{x}".format(x=i), "homepage")
j.save()
ids.append(j.id)
# make sure the last updated dates are suitably different
time.sleep(1)
time.sleep(1)
with self.app_test.test_request_context('/feed'):
f = atom.get_feed("http://my.test.com")
s = f.serialise()
xml = etree.fromstring(s)
entries = xml.findall("{http://www.w3.org/2005/Atom}entry")
for i in range(5):
inv = 4 - i
e = entries[i]
assert e.xpath("atom:author/atom:name", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].text == "Test Publisher {x}".format(x=inv)
assert e.xpath("atom:content", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].get("src").endswith("{x}000-0000?rss".format(x=inv))
assert e.xpath("atom:id", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].text == "urn:uuid:" + ids[inv]
assert e.xpath("atom:link[@rel='alternate']", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].get("href").endswith("{x}000-0000?rss".format(x=inv))
assert e.xpath("atom:link[@rel='related']", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].get("href") == "http://homepage.com/{x}".format(x=inv)
assert e.xpath("atom:category", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].get("term") == "LCC:Agriculture"
assert e.xpath("atom:summary", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].text.startswith("Published by Test Publisher {x}".format(x=inv))
assert e.xpath("atom:title", namespaces={'atom': 'http://www.w3.org/2005/Atom'})[0].text == "Test Journal {x} ({x}000-0000)".format(x=inv)