/
v1.py
127 lines (101 loc) · 3.56 KB
/
v1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
import sys, os
import pickle, base64, StringIO
import urllib2
import threading
import datetime, time
from elementtree import ElementTree as et
SEL_FIELDS = 'Name,ChangeDateUTC,ChangeComment,ChangeReason,ChangedBy.Name,ChangedBy.Nickname'
class Asset(object):
def __init__(self, type, id, name, changed, reason, who):
self.type = type
self.id = id
self.name = name
self.changed = changed
self.reason = reason
self.who = who
def prettyString(self):
return '%s "%s" updated by %s on %s. reason: %s' % (self.type, self.name, self.who, self.changed, self.reason)
class Scanner(object):
def __init__(self, type, v1opts):
self.v1opts = v1opts
self.type = type
self.timebox = None
def fetch(self):
url = self.v1opts['host'] + '/' + self.v1opts['enterprise'] + '/rest-1.v1/Hist/' + self.type + '?sel=' + SEL_FIELDS
if self.timebox:
url += '&where=Timebox.Name=\'' + self.timebox + '\''
url += '&sort=ChangeDateUTC'
req = urllib2.Request(url)
base64string = base64.encodestring('%s:%s' % (self.v1opts['user'], self.v1opts['password']))[:-1]
authheader = "Basic %s" % base64string
req.add_header("Authorization", authheader)
try:
handle = urllib2.urlopen(req)
xml = handle.read()
return xml
except IOError, e:
print '*** PROBLEM ***'
print e.headers
return None
def _parseHistory(self, xml):
'''constructs a history object.'''
assets = []
tree = et.parse(StringIO.StringIO(xml))
for elem in tree.findall('Asset'):
params = {}
for att in elem.findall('Attribute'):
params[att.attrib['name']] = att.text
asset = Asset(self.type, elem.attrib['id'], params['Name'], params['ChangeDateUTC'], params['ChangeReason'], params['ChangedBy.Name'])
assets.append(asset)
return assets
def setTimebox(self, timebox):
if (timebox == self.timebox):
return 0
# first we need to flush the cookie
if os.path.exists(self.v1opts['cookiePath']):
os.remove(self.v1opts['cookiePath'])
self.timebox = timebox
skipped = self.catchUp()
return skipped
# things should be good on the next scan
def scan(self, maxNew=10):
'''returns just the new items. '''
if not self.timebox:
return []
existingCount = 0
if os.path.exists(self.v1opts['cookiePath']):
existingCount = int(open(self.v1opts['cookiePath'], 'r').read())
xml = self.fetch()
history = self._parseHistory(xml)
# lop off the old ones.
history = history[existingCount:]
# trim the ones past max.
if len(history) > maxNew:
print 'trimming history from %d to %d' % (len(history), maxNew)
history = history[:maxNew]
f = open(self.v1opts['cookiePath'], 'w')
f.write(str(len(history) + existingCount))
f.close()
if len(history) > 0:
print 'skipping %d history items' % existingCount
print '%s %d new items' % (self.type, len(history))
return history
def catchUp(self):
count = 0
scan_count = len(self.scan(1000))
while scan_count > 0:
count += scan_count
scan_count = len(self.scan(1000))
print '%s caught up by skipping %d' % (self.type, count)
return count
class ScanThread(threading.Thread):
def __init__(self, scanner, outputter):
threading.Thread.__init__(self)
self.scanner = scanner
self.outputter = outputter
def run(self):
while True:
updates = self.scanner.scan()
for asset in updates:
self.outputter.puts(asset.prettyString())
time.sleep(10)