-
-
Notifications
You must be signed in to change notification settings - Fork 42
/
importer.py
339 lines (297 loc) · 13 KB
/
importer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
# Copyright 2016-2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import logging
from odoo import _
from odoo.addons.connector.exception import MappingError
from odoo.addons.connector.components.mapper import mapping, only_create
from odoo.addons.component.core import Component
from ...components.mapper import (
iso8601_local_date, iso8601_to_utc_datetime, whenempty
)
from ...fields import MilliDatetime
_logger = logging.getLogger(__name__)
class AnalyticLineMapper(Component):
_name = 'jira.analytic.line.mapper'
_inherit = 'jira.import.mapper'
_apply_on = ['jira.account.analytic.line']
direct = [
(whenempty('comment', _('missing description')), 'name'),
(iso8601_local_date('started'), 'date'),
]
@only_create
@mapping
def default(self, record):
return {'is_timesheet': True}
@mapping
def issue(self, record):
issue = self.options.linked_issue
assert issue
refs = {
'jira_issue_id': record['issueId'],
'jira_issue_key': issue['key'],
}
task_mapper = self.component(
usage='import.mapper',
model_name='jira.project.task',
)
refs.update(task_mapper.issue_type(issue))
epic_field_name = self.backend_record.epic_link_field_name
if epic_field_name and epic_field_name in issue['fields']:
refs['jira_epic_issue_key'] = issue['fields'][epic_field_name]
return refs
@mapping
def duration(self, record):
spent = float(record['timeSpentSeconds'])
# amount is in float in odoo... 2h30 = 2.5
return {'unit_amount': spent / 60 / 60}
@mapping
def author(self, record):
jira_author = record['author']
jira_author_key = jira_author['key']
binder = self.binder_for('jira.res.users')
user = binder.to_internal(jira_author_key, unwrap=True)
if not user:
email = jira_author['emailAddress']
raise MappingError(
_('No user found with login "%s" or email "%s".'
'You must create a user or link it manually if the '
'login/email differs.') % (jira_author_key, email)
)
employee = self.env['hr.employee'].search(
[('user_id', '=', user.id)],
limit=1
)
return {'user_id': user.id, 'employee_id': employee.id}
@mapping
def project_and_task(self, record):
assert (
self.options.task_binding or
self.options.project_binding or
self.options.fallback_project
)
task_binding = self.options.task_binding
if not task_binding:
if self.options.fallback_project:
return {'project_id': self.options.fallback_project.id}
project = self.options.project_binding.odoo_id
if project:
return {'project_id': project.id}
project = task_binding.project_id
return {'task_id': task_binding.odoo_id.id,
'project_id': project.id}
@mapping
def backend_id(self, record):
return {'backend_id': self.backend_record.id}
class AnalyticLineBatchImporter(Component):
""" Import the Jira worklogs
For every id in in the list, a delayed job is created.
Import from a date
"""
_name = 'jira.analytic.line.batch.importer'
_inherit = 'jira.timestamp.batch.importer'
_apply_on = ['jira.account.analytic.line']
def _search(self, timestamp):
unix_timestamp = MilliDatetime.to_timestamp(timestamp.last_timestamp)
result = self.backend_adapter.updated_since(since=unix_timestamp)
worklog_ids = self._filter_update(result.updated_worklogs)
# We need issue_id + worklog_id for the worklog importer (the jira
# "read" method for worklogs asks both), get it from yield_read.
# TODO we might consider to optimize the import process here:
# yield_read reads worklogs data, then the individual
# import will do a request again (and 2 with the tempo module)
next_timestamp = MilliDatetime.from_timestamp(result.until)
return (next_timestamp, self.backend_adapter.yield_read(worklog_ids))
def _handle_records(self, records):
count = 0
for worklog in records:
count += 1
worklog_id = worklog['id']
issue_id = worklog['issueId']
self._import_record(issue_id, worklog_id)
return count
def _filter_update(self, updated_worklogs):
"""Filter only the worklogs needing an update
The result from Jira contains the worklog id and
the last update on Jira. So we keep only the worklog
ids with an sync_date before the Jira last update.
"""
if not updated_worklogs:
return []
self.env.cr.execute(
"SELECT external_id, jira_updated_at "
"FROM jira_account_analytic_line "
"WHERE external_id IN %s ",
(tuple(str(r.worklog_id) for r in updated_worklogs),)
)
bindings = {int(row[0]): row[1] for row in self.env.cr.fetchall()}
worklog_ids = []
for worklog in updated_worklogs:
worklog_id = worklog.worklog_id
# we store the latest "updated_at" value on the binding
# so we can check if we already know the latest value,
# for instance because we imported the record from a
# webhook before, we can skip the import
binding_updated_at = bindings.get(worklog_id)
if not binding_updated_at:
worklog_ids.append(worklog_id)
continue
binding_updated_at = MilliDatetime.from_string(
binding_updated_at
)
jira_updated_at = MilliDatetime.from_timestamp(
worklog.updated
)
if binding_updated_at < jira_updated_at:
worklog_ids.append(worklog_id)
return worklog_ids
def _import_record(self, issue_id, worklog_id, **kwargs):
""" Delay the import of the records"""
self.model.with_delay(**kwargs).import_record(
self.backend_record, issue_id, worklog_id,
)
class AnalyticLineImporter(Component):
_name = 'jira.analytic.line.importer'
_inherit = 'jira.importer'
_apply_on = ['jira.account.analytic.line']
def __init__(self, work_context):
super().__init__(work_context)
self.external_issue_id = None
self.task_binding = None
self.project_binding = None
self.fallback_project = None
def _get_external_updated_at(self):
assert self.external_record
external_updated_at = self.external_record.get('updated')
if not external_updated_at:
return None
return iso8601_to_utc_datetime(external_updated_at)
@property
def _issue_fields_to_read(self):
epic_field_name = self.backend_record.epic_link_field_name
return ['issuetype', 'project', 'parent', epic_field_name]
def _recurse_import_task(self):
""" Import and return the task of proper type for the worklog
As we decide which type of issues are imported for a project,
a worklog could be linked to an issue that we don't import.
In that case, we climb the parents of the issue until we find
a issue of a type we synchronize.
It ensures that the 'to-be-linked' issue is imported and return it.
"""
issue_adapter = self.component(usage='backend.adapter',
model_name='jira.project.task')
issue_binder = self.binder_for('jira.project.task')
issue_type_binder = self.binder_for('jira.issue.type')
jira_issue_id = self.external_record['issueId']
epic_field_name = self.backend_record.epic_link_field_name
project_matcher = self.component(usage='jira.task.project.matcher')
current_project_id = self.external_issue['fields']['project']['id']
while jira_issue_id:
issue = issue_adapter.read(
jira_issue_id,
fields=self._issue_fields_to_read,
)
jira_project_id = issue['fields']['project']['id']
jira_issue_type_id = issue['fields']['issuetype']['id']
project_binding = project_matcher.find_project_binding(issue)
issue_type_binding = issue_type_binder.to_internal(
jira_issue_type_id
)
# JIRA allows to set an EPIC of a different project.
# If it happens, we discard it.
if (jira_project_id == current_project_id and
issue_type_binding.is_sync_for_project(project_binding)):
break
if issue['fields'].get('parent'):
# 'parent' is used on sub-tasks relating to their parent task
jira_issue_id = issue['fields']['parent']['id']
elif issue['fields'].get(epic_field_name):
# the epic link is set on a jira custom field
epic_key = issue['fields'][epic_field_name]
epic = issue_adapter.read(epic_key, fields='id')
# we got the key of the epic issue, so we translate
# it to the ID with a call to the API
jira_issue_id = epic['id']
else:
# no parent issue of a type we are synchronizing has been
# found, the worklog will be assigned to no task
jira_issue_id = None
if jira_issue_id:
self._import_dependency(jira_issue_id, 'jira.project.task')
return issue_binder.to_internal(jira_issue_id)
def _create_data(self, map_record, **kwargs):
return super()._create_data(
map_record,
task_binding=self.task_binding,
project_binding=self.project_binding,
fallback_project=self.fallback_project,
linked_issue=self.external_issue,
)
def _update_data(self, map_record, **kwargs):
return super()._update_data(
map_record,
task_binding=self.task_binding,
project_binding=self.project_binding,
fallback_project=self.fallback_project,
linked_issue=self.external_issue,
)
def run(self, external_id, force=False, record=None, **kwargs):
assert 'issue_id' in kwargs
self.external_issue_id = kwargs.pop('issue_id')
return super().run(
external_id, force=force, record=record, **kwargs
)
def _handle_record_missing_on_jira(self):
"""Hook called when we are importing a record missing on Jira
For worklogs, we drop the analytic line if we discover it doesn't exist
on Jira, as the latter is the master.
"""
binding = self._get_binding()
if binding:
record = binding.odoo_id
binding.unlink()
record.unlink()
return _('Record does no longer exist in Jira')
def _get_external_data(self):
""" Return the raw Jira data for ``self.external_id`` """
issue_adapter = self.component(
usage='backend.adapter',
model_name='jira.project.task'
)
self.external_issue = issue_adapter.read(self.external_issue_id)
return self.backend_adapter.read(self.external_issue_id,
self.external_id)
def _before_import(self):
task_binding = self._recurse_import_task()
if task_binding and task_binding.active:
self.task_binding = task_binding
if not self.task_binding:
# when no task exists in Odoo (because we don't synchronize
# the issue type for instance), we link the line directly
# to the corresponding project, not linked to any task
issue = self.external_issue
assert issue
matcher = self.component(usage='jira.task.project.matcher')
project_binding = matcher.find_project_binding(issue)
if project_binding and project_binding.active:
self.project_binding = project_binding
else:
self.fallback_project = matcher.fallback_project_for_worklogs()
def _import(self, binding, **kwargs):
if not (self.task_binding or
self.project_binding or
self.fallback_project):
_logger.debug(
"No task or project synchronized for attaching worklog %s",
self.external_record['id']
)
return
return super()._import(binding, **kwargs)
def _import_dependency_assignee(self):
jira_assignee = self.external_record['author']
jira_key = jira_assignee.get('key')
self._import_dependency(jira_key,
'jira.res.users',
record=jira_assignee)
def _import_dependencies(self):
""" Import the dependencies for the record"""
self._import_dependency_assignee()