From c808faa992b07baef8cbeddcc03e2c825f28acc8 Mon Sep 17 00:00:00 2001 From: Simone Orsi Date: Tue, 9 May 2017 12:11:23 +0200 Subject: [PATCH 01/31] [ADD] connector_importer --- connector_importer/__init__.py | 7 + connector_importer/__manifest__.py | 36 ++ connector_importer/backends.py | 13 + connector_importer/controllers/__init__.py | 3 + connector_importer/controllers/main.py | 25 + connector_importer/data/example_file.xml | 17 + .../data/import_type_example.xml | 17 + connector_importer/data/ir_cron.xml | 17 + connector_importer/events.py | 29 ++ connector_importer/importer/__init__.py | 1 + connector_importer/importer/base_importer.py | 459 ++++++++++++++++++ connector_importer/importer/base_mapper.py | 69 +++ connector_importer/log.py | 26 + connector_importer/menuitems.xml | 48 ++ connector_importer/models/__init__.py | 6 + connector_importer/models/backend.py | 226 +++++++++ connector_importer/models/import_type.py | 36 ++ connector_importer/models/job_mixin.py | 48 ++ connector_importer/models/record.py | 96 ++++ connector_importer/models/recordset.py | 270 +++++++++++ connector_importer/models/reporter.py | 216 +++++++++ connector_importer/models/source.py | 245 ++++++++++ .../security/ir.model.access.csv | 8 + connector_importer/security/security.xml | 11 + connector_importer/tests/test_all.py | 4 + connector_importer/tests/test_backend.py | 18 + connector_importer/utils/__init__.py | 0 connector_importer/utils/importer_utils.py | 127 +++++ connector_importer/utils/mapper_utils.py | 291 +++++++++++ connector_importer/utils/misc.py | 15 + connector_importer/utils/report_html.py | 140 ++++++ connector_importer/views/docs_template.xml | 109 +++++ .../views/import_backend_views.xml | 102 ++++ .../views/import_recordset_views.xml | 109 +++++ .../views/import_source_views.xml | 34 ++ .../views/import_user_views.xml | 51 ++ connector_importer/views/report_template.xml | 46 ++ .../views/source_config_template.xml | 12 + connector_importer/wizards/__init__.py | 2 + connector_importer/wizards/base.py | 67 +++ connector_importer/wizards/base.xml | 21 + .../wizards/products/__init__.py | 2 + .../products/wizard_import_products.py | 31 ++ .../products/wizard_import_products.xml | 45 ++ 44 files changed, 3155 insertions(+) create mode 100644 connector_importer/__init__.py create mode 100644 connector_importer/__manifest__.py create mode 100644 connector_importer/backends.py create mode 100644 connector_importer/controllers/__init__.py create mode 100644 connector_importer/controllers/main.py create mode 100644 connector_importer/data/example_file.xml create mode 100644 connector_importer/data/import_type_example.xml create mode 100644 connector_importer/data/ir_cron.xml create mode 100644 connector_importer/events.py create mode 100644 connector_importer/importer/__init__.py create mode 100644 connector_importer/importer/base_importer.py create mode 100644 connector_importer/importer/base_mapper.py create mode 100644 connector_importer/log.py create mode 100644 connector_importer/menuitems.xml create mode 100644 connector_importer/models/__init__.py create mode 100644 connector_importer/models/backend.py create mode 100644 connector_importer/models/import_type.py create mode 100644 connector_importer/models/job_mixin.py create mode 100644 connector_importer/models/record.py create mode 100644 connector_importer/models/recordset.py create mode 100644 connector_importer/models/reporter.py create mode 100644 connector_importer/models/source.py create mode 100644 connector_importer/security/ir.model.access.csv create mode 100644 connector_importer/security/security.xml create mode 100644 connector_importer/tests/test_all.py create mode 100644 connector_importer/tests/test_backend.py create mode 100644 connector_importer/utils/__init__.py create mode 100644 connector_importer/utils/importer_utils.py create mode 100644 connector_importer/utils/mapper_utils.py create mode 100644 connector_importer/utils/misc.py create mode 100644 connector_importer/utils/report_html.py create mode 100644 connector_importer/views/docs_template.xml create mode 100644 connector_importer/views/import_backend_views.xml create mode 100644 connector_importer/views/import_recordset_views.xml create mode 100644 connector_importer/views/import_source_views.xml create mode 100644 connector_importer/views/import_user_views.xml create mode 100644 connector_importer/views/report_template.xml create mode 100644 connector_importer/views/source_config_template.xml create mode 100644 connector_importer/wizards/__init__.py create mode 100644 connector_importer/wizards/base.py create mode 100644 connector_importer/wizards/base.xml create mode 100644 connector_importer/wizards/products/__init__.py create mode 100644 connector_importer/wizards/products/wizard_import_products.py create mode 100644 connector_importer/wizards/products/wizard_import_products.xml diff --git a/connector_importer/__init__.py b/connector_importer/__init__.py new file mode 100644 index 000000000..ecf63c28f --- /dev/null +++ b/connector_importer/__init__.py @@ -0,0 +1,7 @@ + +from . import backends +from . import models +from . import importer +# TODO +# from . import wizards +from . import controllers diff --git a/connector_importer/__manifest__.py b/connector_importer/__manifest__.py new file mode 100644 index 000000000..2db5349c5 --- /dev/null +++ b/connector_importer/__manifest__.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +############################################################################## +# +# Copyright (C) 2017 Camptocamp +# +############################################################################## +{ + 'name': 'Connector Importer', + 'description': """This module takes care of import sessions.""", + 'version': '10.0.1.0.0', + 'depends': [ + 'connector', + ], + 'author': 'Camptocamp', + 'license': 'AGPL-3', + 'category': 'Uncategorized', + 'website': 'http://www.camptocamp.com', + 'data': [ + 'data/ir_cron.xml', + 'security/security.xml', + 'security/ir.model.access.csv', + 'views/import_backend_views.xml', + 'views/import_recordset_views.xml', + 'views/import_source_views.xml', + 'views/report_template.xml', + 'views/docs_template.xml', + 'views/source_config_template.xml', + # TODO + # 'views/import_user_views.xml', + # TODO + # 'wizards/base.xml', + # 'wizards/products/wizard_import_products.xml', + 'menuitems.xml', + ], + 'external_dependencies': {'python': ['chardet']}, +} diff --git a/connector_importer/backends.py b/connector_importer/backends.py new file mode 100644 index 000000000..7664097d1 --- /dev/null +++ b/connector_importer/backends.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import openerp.addons.connector.backend as backend + + +import_backend = backend.Backend('import_backend') +""" Generic Import Backend """ + +import_backend_default = backend.Backend(parent=import_backend, version='1.0') +""" Import backend for version 1.0 """ diff --git a/connector_importer/controllers/__init__.py b/connector_importer/controllers/__init__.py new file mode 100644 index 000000000..65a8c1201 --- /dev/null +++ b/connector_importer/controllers/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +from . import main diff --git a/connector_importer/controllers/main.py b/connector_importer/controllers/main.py new file mode 100644 index 000000000..054ed9ab5 --- /dev/null +++ b/connector_importer/controllers/main.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import http +from odoo.http import request +# import werkzeug +from ..utils.report_html import Reporter + + +class ReportController(http.Controller): + """Controller for display import reports.""" + + @http.route( + '/importer/import-recordset/', + type='http', auth="user", website=False) + def full_report(self, recordset, **kwargs): + reporter = Reporter(recordset.jsondata, detailed=1) + values = { + 'recordset': recordset, + 'report': reporter.html(wrapped=0), + + } + return request.render("connector_importer.recordset_report", values) diff --git a/connector_importer/data/example_file.xml b/connector_importer/data/example_file.xml new file mode 100644 index 000000000..db21070fe --- /dev/null +++ b/connector_importer/data/example_file.xml @@ -0,0 +1,17 @@ + + + + + + <-- + + Y29udGVudCBmcm9tIHNldHVwIHNvbmc=\n + foo.csv + foo.csv + + --> + + diff --git a/connector_importer/data/import_type_example.xml b/connector_importer/data/import_type_example.xml new file mode 100644 index 000000000..e8613de17 --- /dev/null +++ b/connector_importer/data/import_type_example.xml @@ -0,0 +1,17 @@ + + + + + + + + + diff --git a/connector_importer/data/ir_cron.xml b/connector_importer/data/ir_cron.xml new file mode 100644 index 000000000..0c2adc3f5 --- /dev/null +++ b/connector_importer/data/ir_cron.xml @@ -0,0 +1,17 @@ + + + + + Importer backend: cleanup old recordsets + + + 1 + weeks + -1 + + import.backend + cron_cleanup_recordsets + () + + + diff --git a/connector_importer/events.py b/connector_importer/events.py new file mode 100644 index 000000000..110c17db1 --- /dev/null +++ b/connector_importer/events.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.addons.connector.event import Event + +chunk_finished_event = Event() + + +@chunk_finished_event +def chunk_finished_subscriber(env, dest_model_name, last_record): + """Run `import_record_after_all` after last record has been imported.""" + if not last_record.job_id: + # ok... we are not running in cron mode..my job here has finished! + return + # TODO + # backend = last_record.backend_id + # recordset = last_record.recordset_id + # other_records_completed = [ + # r.job_id.state == 'done' + # for r in recordset.record_ids + # if r != last_record + # ] + # if all(other_records_completed): + # job_method = last_record.with_delay().import_record_after_all + # if backend.debug_mode(): + # job_method = last_record.import_record_after_all + # job_method(last_record_id=record_id) diff --git a/connector_importer/importer/__init__.py b/connector_importer/importer/__init__.py new file mode 100644 index 000000000..342e818ff --- /dev/null +++ b/connector_importer/importer/__init__.py @@ -0,0 +1 @@ +from . import base_importer diff --git a/connector_importer/importer/base_importer.py b/connector_importer/importer/base_importer.py new file mode 100644 index 000000000..0e2d99766 --- /dev/null +++ b/connector_importer/importer/base_importer.py @@ -0,0 +1,459 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo.addons.connector.unit.synchronizer import Importer +from odoo import fields + +from ..backends import import_backend +from ..log import logger +from ..events import chunk_finished_event + + +@import_backend +class BaseImporter(Importer): + _model_name = '' + + +@import_backend +class RecordSetImporter(BaseImporter): + """Base importer for recordsets.""" + + _model_name = 'import.recordset' + + def run(self, recordset, **kw): + # update recordset report + recordset.set_report({ + '_last_start': fields.Datetime.now(), + }, reset=True) + msg = 'START RECORDSET {0}({1})'.format(recordset.name, + recordset.id) + logger.info(msg) + + record_model = recordset.record_ids + + source = recordset.get_source() + for chunk in source.get_lines(): + # create chuncked records and run their imports + record = record_model.create({'recordset_id': recordset.id}) + # store data + record.set_data(chunk) + record.run_import() + + +class ChunkReport(dict): + """A smarter dict for chunk data.""" + + report_keys = ( + 'created', + 'updated', + 'errored', + 'skipped', + ) + + def __init__(self, **kwargs): + super(ChunkReport, self).__init__(**kwargs) + for k in self.report_keys: + self[k] = [] + + def track_error(self, item): + self['errored'].append(item) + + def track_skipped(self, item): + self['skipped'].append(item) + + def track_updated(self, item): + self['updated'].append(item) + + def track_created(self, item): + self['created'].append(item) + + def counters(self): + res = {} + for k, v in self.iteritems(): + res[k] = len(v) + return res + + +class OdooRecordMixin(object): + + _model_name = '' + unique_key = '' + + def find_domain(self, values, orig_values): + return [(self.unique_key, '=', values[self.unique_key])] + + def find(self, values, orig_values): + """Find any existing item.""" + item = self.model.search( + self.find_domain(values, orig_values), + order='create_date desc', limit=1) + return item + + def exists(self, values, orig_values): + """Return true if the items exists.""" + return bool(self.find(values, orig_values)) + + def default_values(self): + """Values that are automatically assigned.""" + return self.mapper.default_values() + + def translatable_keys(self, create=False): + """Keys that are translatable.""" + return self.mapper.translatable_keys() + + def translatable_langs(self): + return self.env['res.lang'].search([ + ('translatable', '=', True)]).mapped('code') + + def make_translation_key(self, key, lang): + return u'{}:{}'.format(key, lang) + + def collect_translatable(self, values, orig_values): + """Get translations values for `mapper.translatable_keys`. + + We assume that the source contains extra columns in the form: + + `mapper_key:lang` + + whereas `mapper_key` is an odoo record field to translate + and lang matches one of the installed languages. + + Translatable keys must be declared on the mapper + within the attribute `translatable`. + """ + translatable = {} + if not self.translatable_keys(): + return translatable + for lang in self.translatable_langs(): + for key in self.translatable_keys(): + # eg: name:fr_FR + tkey = self.make_translation_key(key, lang) + if tkey in orig_values and values.get(key): + if lang not in translatable: + translatable[lang] = {} + # we keep only translation for existing values + translatable[lang][key] = orig_values.get(tkey) + return translatable + + def update_translations(self, odoo_record, translatable, ctx=None): + ctx = ctx or {} + for lang, values in translatable.iteritems(): + odoo_record.with_context( + lang=lang, **self.write_context()).write(values) + + def pre_create(self, values, orig_values): + """Do some extra stuff before creating a missing object.""" + pass + + def post_create(self, odoo_record, values, orig_values): + """Do some extra stuff after creating a missing object.""" + pass + + def create_context(self): + """Inject context variables on create.""" + return {} + + def create(self, values, orig_values): + """Create a new odoo record.""" + self.pre_create(values, orig_values) + # TODO: remove keys that are not model's fields + odoo_record = self.model.with_context( + **self.create_context()).create(values) + self.post_create(odoo_record, values, orig_values) + translatable = self.collect_translatable(values, orig_values) + self.update_translations(odoo_record, translatable) + return odoo_record + + def pre_write(self, odoo_record, values, orig_values): + """Do some extra stuff before updating an existing object.""" + pass + + def post_write(self, odoo_record, values, orig_values): + """Do some extra stuff after updating an existing object.""" + pass + + def write_context(self): + """Inject context variables on write.""" + return {} + + def write(self, values, orig_values): + """Update an existing odoo record.""" + # TODO: add a checkpoint? log something? + odoo_record = self.find(values, orig_values) + self.pre_write(odoo_record, values, orig_values) + # TODO: remove keys that are not model's fields + odoo_record.with_context(**self.write_context()).write(values) + self.post_write(odoo_record, values, orig_values) + translatable = self.collect_translatable(values, orig_values) + self.update_translations(odoo_record, translatable) + return odoo_record + + +class TrackingMixin(object): + + _model_name = '' + _chunk_report = None + + @property + def chunk_report(self): + if not self._chunk_report: + self._chunk_report = ChunkReport() + return self._chunk_report + + def chunk_report_item(self, line, odoo_record=None, message=''): + return { + 'line_nr': line['_line_nr'], + 'message': message, + 'model': self._model_name, + 'odoo_record': odoo_record.id if odoo_record else None, + } + + def _log(self, msg, line=None, level='info'): + handler = getattr(self._logger, level) + msg = u'{prefix}{line}[model: {model}] {msg}'.format( + prefix=self._log_prefix, + line='[line: {}]'.format(line['_line_nr']) if line else '', + model=self._model_name, + msg=msg + ) + handler(msg) + + def _log_updated(self, values, line, odoo_record, message=''): + self._log('UPDATED [id: {}]'.format(odoo_record.id), line=line) + self.chunk_report.track_updated(self.chunk_report_item( + line, odoo_record=odoo_record, message=message + )) + + def _log_error(self, values, line, odoo_record, message=''): + if isinstance(message, Exception): + message = str(message) + self._log(message, line=line, level='error') + self.chunk_report.track_error(self.chunk_report_item( + line, odoo_record=odoo_record, message=message + )) + + def _log_created(self, values, line, odoo_record, message=''): + self._log('CREATED [id: {}]'.format(odoo_record.id), line=line) + self.chunk_report.track_created(self.chunk_report_item( + line, odoo_record=odoo_record, message=message + )) + + def _log_skipped(self, values, line, skip_info): + # `skip_it` could contain a msg + self._log('SKIPPED ' + skip_info.get('message'), + line=line, level='warn') + + item = self.chunk_report_item(line) + item.update(skip_info) + self.chunk_report.track_skipped(item) + + def _prepare_report(self, previous): + # init a new report + report = ChunkReport() + # merge previous and current + for k, v in report.iteritems(): + prev = previous.get(self._model_name, {}).get(k, []) + report[k] = prev + self.chunk_report[k] + return report + + +@import_backend +class RecordImporter(BaseImporter, OdooRecordMixin, TrackingMixin): + """Base importer for records.""" + + # _base_mapper = '' + _model_name = '' + # log and report errors + # do not make the whole import fail + _break_on_error = False + + def required_keys(self, create=False): + """Keys that are mandatory to import a line.""" + req = self.mapper.required_keys() + all_values = [] + for k, v in req.iteritems(): + # make sure values are always tuples + # as we support multiple dest keys + if not isinstance(v, (tuple, list)): + req[k] = (v, ) + all_values.extend(req[k]) + if (self.unique_key and + self.unique_key not in req.keys() and + self.unique_key not in all_values): + # this one is REALLY required :) + req[self.unique_key] = (self.unique_key, ) + return req + + def _check_missing(self, source_key, dest_key, values, orig_values): + missing = (not source_key.startswith('__') and + orig_values.get(source_key) is None) + if missing: + msg = 'MISSING REQUIRED SOURCE KEY={}'.format(source_key) + if self.unique_key and values.get(self.unique_key): + msg += ': {}={}'.format( + self.unique_key, values[self.unique_key]) + return { + 'message': msg, + } + missing = (not dest_key.startswith('__') and + values.get(dest_key) is None) + if missing: + msg = 'MISSING REQUIRED DESTINATION KEY={}'.format(dest_key) + if self.unique_key and values.get(self.unique_key): + msg += ': {}={}'.format( + self.unique_key, values[self.unique_key]) + return { + 'message': msg, + } + return False + + def skip_it(self, values, orig_values): + """Skip item import conditionally... if you want ;). + + You can return back `False` to not skip + or a dictionary containing info about skip reason. + """ + msg = '' + required = self.required_keys() + for source_key, dest_key in required.iteritems(): + # we support multiple destination keys + for _dest_key in dest_key: + missing = self._check_missing( + source_key, _dest_key, values, orig_values) + if missing: + return missing + + if self.exists(values, orig_values) \ + and not self.recordset.override_existing: + msg = 'ALREADY EXISTS' + if self.unique_key: + msg += ': {}={}'.format( + self.unique_key, values[self.unique_key]) + return { + 'message': msg, + 'odoo_record': self.find(values, orig_values).id, + } + return False + + def cleanup_line(self, line): + """Apply basic cleanup on lines.""" + # we cannot alter dict keys while iterating + res = {} + for k, v in line.iteritems(): + if not k.startswith('_'): + k = self.clean_line_key(k) + if isinstance(v, basestring): + v = v.strip() + res[k] = v + return res + + def clean_line_key(self, key): + """Clean record key. + + Sometimes your CSV source do not have proper keys, + they can contain a lot of crap or they can change + lower/uppercase from import to import. + You can override this method to normalize keys + and make your import mappers work reliably. + """ + return key.strip() + + def prepare_line(self, line): + """Pre-manipulate a line if needed.""" + pass + + def _init(self, recordset): + self.recordset = recordset + self.backend = self.recordset.backend_id + self._log_prefix = self.recordset.import_type_id.key + ' ' + self._logger = logger + + def _do_report(self): + previous = self.recordset.get_report() + report = self._prepare_report(previous) + self.recordset.set_report({self._model_name: report}) + + def _record_lines(self): + return self.record.get_data() + + def _load_mapper_options(self): + return { + 'override_existing': self.recordset.override_existing + } + + def run(self, record, **kw): + """Run the import machinery!""" + + self.record = record + if not self.record: + # maybe deleted??? + msg = 'NO RECORD FOUND, maybe deleted? Check your jobs!' + logger.error(msg) + return + + self._init(self.record.recordset_id) + + mapper_options = self._load_mapper_options() + + for line in self._record_lines(): + line = self.cleanup_line(line) + self.prepare_line(line) + + odoo_record = None + + try: + with self.env.cr.savepoint(): + values = self.mapper.map_record(line).values( + **mapper_options) + except Exception, err: + values = {} + self._log_error(values, line, odoo_record, message=err) + if self._break_on_error: + raise + continue + + # handle forced skipping + skip_info = self.skip_it(values, line) + if skip_info: + self._log_skipped(values, line, skip_info) + continue + + try: + with self.env.cr.savepoint(): + if self.exists(values, line): + odoo_record = self.write(values, line) + self._log_updated(values, line, odoo_record) + else: + odoo_record = self.create(values, line) + self._log_created(values, line, odoo_record) + except Exception, err: + self._log_error(values, line, odoo_record, message=err) + if self._break_on_error: + raise + continue + + # update report + self._do_report() + + # log chunk finished + msg = ' '.join([ + 'CHUNK FINISHED', + '[created: {created}]', + '[updated: {updated}]', + '[skipped: {skipped}]', + '[errored: {errored}]', + ]).format(**self.chunk_report.counters()) + self._log(msg) + + chunk_finished_event.fire( + self.env, self.model._name, self.record) + + def after_all(self, recordset): + """Get something done after all the children jobs have completed. + + This should be triggered by `chunk_finished_event`. + """ + # TODO: needed for logger and other stuff. Can be simplified. + self._init(recordset) diff --git a/connector_importer/importer/base_mapper.py b/connector_importer/importer/base_mapper.py new file mode 100644 index 000000000..5d38dc8a7 --- /dev/null +++ b/connector_importer/importer/base_mapper.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +from openerp.addons.connector.unit.mapper import ImportMapper, mapping + + +class BaseImportMapper(ImportMapper): + + required = { + # source key: dest key + # You can declare here the keys the importer must have + # to import a record. + # `source key` means a key in the source record + # either a line in a csv file or a lien from an sql table. + # `dest key` is the destination the for the source one. + + # Eg: in your mapper you could have a mapping like + # direct = [ + # ('title', 'name'), + # (concat(('title', 'foo', ), separator=' - '), 'baz'), + # ] + # You want the record to be skipped if: + # 1. title or name are not valued in the source + # 2. title is valued but the conversion gives an empty value for name + # 3. title or foo are not valued in the source + # 4. title and foo are valued but the conversion + # gives an empty value for baz + + # You can achieve this like: + # required = { + # 'title': ('name', 'baz'), + # 'foo': 'baz', + # } + + # If you want to check only the source or the destination key + # use the same and prefix in w/ double underscore, like: + + # {'__foo': 'baz', 'foo': '__baz'} + } + + def required_keys(self, create=False): + """Return required keys for this mapper. + + The importer can use this to determine if a line + has to be skipped. + """ + return self.required + + translatable = [] + + def translatable_keys(self, create=False): + """Return translatable keys for this mapper. + + The importer can use this to translate specific fields + if the are found in the csv in the form `field_name:lang_code`. + """ + return self.translatable + + defaults = [ + # odoo field, value + # ('sale_ok', True), + ] + + @mapping + def default_values(self, record=None): + return dict(self.defaults) diff --git a/connector_importer/log.py b/connector_importer/log.py new file mode 100644 index 000000000..773321fbc --- /dev/null +++ b/connector_importer/log.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import os +import logging +from logging.handlers import RotatingFileHandler + +logger = logging.getLogger('[importer]') +logger.setLevel(logging.INFO) + +if os.environ.get('IMPORTER_LOG_PATH'): + # use separated log file when developing + FNAME = 'importer.log' + + base_path = os.environ.get('IMPORTER_LOG_PATH') + if not os.path.exists(base_path): + os.makedirs(base_path) + + # add a rotating handler + handler = RotatingFileHandler(base_path + '/' + FNAME, + maxBytes=1024 * 5, + backupCount=5) + logger.addHandler(handler) + logging.info('logging to {}'.format(base_path + '/' + FNAME)) diff --git a/connector_importer/menuitems.xml b/connector_importer/menuitems.xml new file mode 100644 index 000000000..f0a81bc27 --- /dev/null +++ b/connector_importer/menuitems.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + + diff --git a/connector_importer/models/__init__.py b/connector_importer/models/__init__.py new file mode 100644 index 000000000..520870497 --- /dev/null +++ b/connector_importer/models/__init__.py @@ -0,0 +1,6 @@ +from . import backend +from . import import_type +from . import source +from . import recordset +from . import record +from . import reporter diff --git a/connector_importer/models/backend.py b/connector_importer/models/backend.py new file mode 100644 index 000000000..febcfecc6 --- /dev/null +++ b/connector_importer/models/backend.py @@ -0,0 +1,226 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from contextlib import contextmanager +from odoo.addons.connector.connector import ConnectorEnvironment +from odoo import models, fields, api, exceptions, _ +import logging + +cleanup_logger = logging.getLogger('[recordset-cleanup]') + +BACKEND_VERSIONS = [ + ('1.0', 'Version 1.0'), +] + + +class ImportBackend(models.Model): + _name = 'import.backend' + _description = 'Import Backend' + _inherit = 'connector.backend' + _backend_type = 'import_backend' + + @contextmanager + @api.multi + def get_environment(self, model_name): + self.ensure_one() + yield ConnectorEnvironment(self, model_name) + + @api.model + def _select_version(self): + """ Available versions + + Can be inherited to add custom versions. + """ + return BACKEND_VERSIONS + + @api.model + def _select_interval_type(self): + return [ + ('hours', 'Hours'), + ('work_days', 'Work Days'), + ('days', 'Days'), + ('weeks', 'Weeks'), + ('months', 'Months') + ] + + version = fields.Selection( + selection='_select_version', + string='Version', + required=True, + ) + recordset_ids = fields.One2many( + 'import.recordset', + 'backend_id', + string='Record Sets', + ) + # cron stuff + cron_mode = fields.Boolean('Cron mode?') + cron_start_date = fields.Datetime( + 'Start date', + ) + cron_interval_number = fields.Integer('Interval number') + cron_interval_type = fields.Selection( + selection='_select_interval_type', + string='Interval type', + ) + cron_id = fields.Many2one( + 'ir.cron', + string='Related cron', + domain=lambda self: [('model', '=', self._name)], + ) + cron_master_recordset_id = fields.Many2one( + 'import.recordset', + string='Master recordset', + help=('If an existing recordset is selected ' + 'it will be used to create a new recordset ' + 'each time the cron runs. ' + '\nIn this way you can keep every import session isolated. ' + '\nIf none, all recordsets will run.') + ) + cron_cleanup_keep = fields.Integer( + string='Cron cleanup keep', + help=('If this value is greater than 0 ' + 'a cron will cleanup old recordsets ' + 'and keep only the latest N records matching this value.'), + ) + notes = fields.Text('Notes') + debug_mode = fields.Boolean( + 'Debug mode?', + help=_("Enabling debug mode causes the import to run " + "in real time, without using any job queue. " + "Make sure you don't do this in production!") + ) + job_running = fields.Boolean( + 'Job running', + compute='_compute_job_running', + help=_("Tells you if a job is running for this backend."), + readonly=True + ) + enable_user_mode = fields.Boolean( + 'Enable user mode', + default=True, + help=_("Enabling user mode allows simple users " + "to use the quick wizard for importing recordsets on demand.") + ) + + @api.model + def get_cron_vals(self, backend=None): + backend = backend or self + return { + 'name': 'Cron for import backend %s' % backend.name, + 'model': backend._name, + 'function': 'run_all', + 'args': '(%s,)' % str(backend.id), + 'interval_number': backend.cron_interval_number, + 'interval_type': backend.cron_interval_type, + 'nextcall': backend.cron_start_date, + } + + def _update_or_create_cron(self): + """Update or create cron record if needed.""" + if self.cron_mode: + cron_model = self.env['ir.cron'] + cron_vals = self.get_cron_vals() + if not self.cron_id: + self.cron_id = cron_model.create(cron_vals) + else: + self.cron_id.write(cron_vals) + + @api.model + def create(self, vals): + """ handle cron stuff + """ + backend = super(ImportBackend, self).create(vals) + backend._update_or_create_cron() + return backend + + @api.multi + def write(self, vals): + """ handle cron stuff + """ + res = super(ImportBackend, self).write(vals) + for backend in self: + backend._update_or_create_cron() + return res + + @api.multi + def unlink(self): + for item in self: + item.check_delete() + return super(ImportBackend, self).unlink() + + @api.model + def check_delete(self): + """ if debug mode is not ON check that we don't have + any jobs related to our sub records. + """ + if not self.debug_mode and self.job_running: + raise exceptions.Warning(_('You must complete the job first!')) + + @api.multi + def _compute_job_running(self): + for item in self: + running = False + for recordset in self.recordset_ids: + if recordset.has_job() and not recordset.job_done(): + running = True + break + for record in recordset.record_ids: + if record.has_job() and not record.job_done(): + running = True + break + item.job_running = running + + @api.model + def run_all(self, backend_id=None): + """ run all recordset imports + """ + backend = backend_id and self.browse(backend_id) or self + backend.ensure_one() + recordsets = backend.recordset_ids + if backend.cron_master_recordset_id: + # clone and use it to run + recordsets = backend.cron_master_recordset_id.copy() + for recordset in recordsets: + recordset.run_import() + + @api.model + def cron_cleanup_recordsets(self): + cleanup_logger.info('Looking for recorsets to cleanup.') + backends = self.search([('cron_cleanup_keep', '>', 0)]) + to_clean = self.env['import.recordset'] + for backend in backends: + if len(backend.recordset_ids) <= backend.cron_cleanup_keep: + continue + to_keep = backend.recordset_ids.sorted( + lambda x: x.create_date, + reverse=True + )[:backend.cron_cleanup_keep] + # always keep this + to_keep |= backend.cron_master_recordset_id + to_clean = backend.recordset_ids - to_keep + if to_clean: + msg = 'Cleaning up {}'.format(','.join(to_clean.mapped('name'))) + cleanup_logger.info(msg) + to_clean.unlink() + else: + cleanup_logger.info('Nothing to do.') + + @api.multi + def button_complete_jobs(self): + """ set all jobs to "completed" state. + """ + self.ensure_one() + for recordset in self.recordset_ids: + for record in recordset.record_ids: + if record.has_job() and not record.job_done(): + record.job_id.button_done() + if recordset.has_job() and not recordset.job_done(): + recordset.job_id.button_done() + + @api.onchange('enable_user_mode') + def _onchange_enable_user_mode(self): + """If user mode is enabled we want to run it with jobs by default.""" + self.debug_mode = not self.enable_user_mode diff --git a/connector_importer/models/import_type.py b/connector_importer/models/import_type.py new file mode 100644 index 000000000..2e85215bf --- /dev/null +++ b/connector_importer/models/import_type.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import models, fields, api + + +class ImportType(models.Model): + _name = 'import.type' + _description = 'Import type' + + name = fields.Char(required=True) + key = fields.Char(required=True) + settings = fields.Text( + string='Settings', + required=True, + help=""" + # comment me + product.template:dotted.path.to.importer + product.product:dotted.path.to.importer + # another one + product.supplierinfo:dotted.path.to.importer + """ + ) + # TODO: provide default source and configuration policy + # for an import type to ease bootstrapping recordsets from UI. + # default_source_model_id = fields.Many2one() + + @api.multi + def available_models(self): + self.ensure_one() + for line in self.settings.strip().splitlines(): + if line.strip() and not line.startswith('#'): + model, importer = line.split(':') + yield (model, importer) diff --git a/connector_importer/models/job_mixin.py b/connector_importer/models/job_mixin.py new file mode 100644 index 000000000..0ddf6b41f --- /dev/null +++ b/connector_importer/models/job_mixin.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import fields, api, exceptions, _ + +from odoo.addons.queue_job.job import DONE, STATES + + +class JobRelatedMixin(object): + """Mixin klass for queue.job relationship. + + We do not use an abstract model to be able to not re-define + the relation on each inheriting model. + """ + + job_id = fields.Many2one( + 'queue.job', + string='Job', + readonly=True, + ) + job_state = fields.Selection( + STATES, + string='Job State', + readonly=True, + index=True, + related='job_id.state' + ) + + @api.model + def has_job(self): + return bool(self.job_id) + + @api.model + def job_done(self): + return self.job_state == DONE + + @api.model + def check_delete(self): + if self.has_job() and not self.job_done(): + raise exceptions.Warning(_('You must complete the job first!')) + + @api.multi + def unlink(self): + for item in self: + item.check_delete() + return super(JobRelatedMixin, self).unlink() diff --git a/connector_importer/models/record.py b/connector_importer/models/record.py new file mode 100644 index 000000000..f06df26b4 --- /dev/null +++ b/connector_importer/models/record.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import json +import os + +from odoo import models, fields, api +from odoo.addons.queue_job.job import job + +from .recordset import get_record_importer +from .job_mixin import JobRelatedMixin +from ..log import logger + + +class ImportRecord(models.Model, JobRelatedMixin): + _name = 'import.record' + _description = 'Import record' + _order = 'date DESC' + _backend_type = 'import_backend' + + date = fields.Datetime( + 'Import date', + default=fields.Date.context_today, + ) + jsondata = fields.Text('JSON Data') + recordset_id = fields.Many2one( + 'import.recordset', + string='Recordset' + ) + backend_id = fields.Many2one( + 'import.backend', + string='Backend', + related='recordset_id.backend_id', + readonly=True, + ) + + @api.multi + def unlink(self): + # inheritance of non-model mixin does not work w/out this + return super(ImportRecord, self).unlink() + + @api.multi + @api.depends('date') + def _compute_name(self): + for item in self: + names = [ + item.date, + ] + item.name = ' / '.join(filter(None, names)) + + @api.multi + def set_data(self, adict): + self.ensure_one() + self.jsondata = json.dumps(adict) + + @api.multi + def get_data(self): + self.ensure_one() + return json.loads(self.jsondata or '{}') + + @api.multi + def debug_mode(self): + self.ensure_one() + return self.backend_id.debug_mode or \ + os.environ.get('IMPORTER_DEBUG_MODE') + + @job + def import_record(self, dest_model_name, importer_dotted_path=None, **kw): + """This job will import a record.""" + + with self.backend_id.get_environment(dest_model_name) as env: + importer = get_record_importer( + env, importer_dotted_path=importer_dotted_path) + return importer.run(self) + + @api.multi + def run_import(self): + """ queue a job for importing data stored in to self + """ + job_method = self.with_delay().import_record + if self.debug_mode(): + logger.warn('### DEBUG MODE ACTIVE: WILL NOT USE QUEUE ###') + job_method = self.import_record + for item in self: + # we create a record and a job for each model name + # that needs to be imported + for model, importer in item.recordset_id.available_models(): + job = job_method(model, importer_dotted_path=importer) + if job: + # link the job + item.write({'job_id': job.db_record().id}) + if self.debug_mode(): + # debug mode, no job here: reset it! + item.write({'job_id': False}) diff --git a/connector_importer/models/recordset.py b/connector_importer/models/recordset.py new file mode 100644 index 000000000..17d59d003 --- /dev/null +++ b/connector_importer/models/recordset.py @@ -0,0 +1,270 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import json +import os +from collections import OrderedDict + +from odoo import models, fields, api +from odoo.addons.connector.unit.synchronizer import Importer +from odoo.addons.queue_job.job import ( + DONE, STATES, job) + +from .job_mixin import JobRelatedMixin +from ..log import logger +from ..utils.misc import import_klass_from_dotted_path + + +def get_record_importer(env, importer_dotted_path=None): + if importer_dotted_path is None: + return env.get_connector_unit(Importer) + if not importer_dotted_path.startswith('odoo.addons.'): + importer_dotted_path = 'odoo.addons.' + importer_dotted_path + return env.get_connector_unit( + import_klass_from_dotted_path(importer_dotted_path)) + + +class ImportRecordSet(models.Model, JobRelatedMixin): + _name = 'import.recordset' + _inherit = 'import.source.consumer.mixin' + _description = 'Import recordset' + _order = 'sequence ASC, create_date DESC' + _backend_type = 'import_backend' + + backend_id = fields.Many2one( + 'import.backend', + string='Import Backend' + ) + sequence = fields.Integer( + 'Sequence', + help="Sequence for the handle.", + default=10 + ) + import_type_id = fields.Many2one( + string='Import type', + comodel_name='import.type', + required=True, + ) + override_existing = fields.Boolean( + string='Override existing items', + default=True, + ) + name = fields.Char( + string='Name', + compute='_compute_name', + ) + create_date = fields.Datetime( + 'Create date', + ) + record_ids = fields.One2many( + 'import.record', + 'recordset_id', + string='Records', + ) + # store info about imports report + jsondata = fields.Text('JSON Data') + report_html = fields.Html( + 'Report summary', compute='_compute_report_html') + full_report_url = fields.Char( + 'Full report url', compute='_compute_full_report_url') + jobs_global_state = fields.Selection( + string='Jobs global state', + selection=STATES, + compute='_compute_jobs_global_state', + help=( + "Tells you if a job is running for this recordset. " + "If any of the sub jobs is not DONE or FAILED " + "we assume the global state is PENDING." + ), + readonly=True + ) + report_file = fields.Binary('Report file') + report_filename = fields.Char('Report filename') + docs_html = fields.Html( + 'Docs', compute='_compute_docs_html') + notes = fields.Html('Notes', help="Useful info for your users") + + @api.multi + def unlink(self): + # inheritance of non-model mixin - like JobRelatedMixin - + # does not work w/out this + return super(ImportRecordSet, self).unlink() + + @api.one + @api.depends('backend_id.name') + def _compute_name(self): + names = [ + self.backend_id.name, + '#' + str(self.id), + ] + self.name = ' '.join(filter(None, names)) + + @api.multi + def set_report(self, values, reset=False): + """ update import report values + """ + self.ensure_one() + if reset: + _values = {} + else: + _values = self.get_report() + _values.update(values) + self.jsondata = json.dumps(_values) + + @api.model + def get_report(self): + return json.loads(self.jsondata or '{}') + + @api.depends('jsondata') + def _compute_report_html(self): + template = self.env.ref('connector_importer.recordset_report') + for item in self: + if not item.jsondata: + continue + report = item.get_report() + data = { + 'recordset': item, + 'last_start': report.pop('_last_start'), + 'report_by_model': OrderedDict(), + } + # count keys by model + for _model, __ in item.available_models(): + model = self.env['ir.model'].search( + [('model', '=', _model)], limit=1) + data['report_by_model'][model] = {} + # be defensive here. At some point + # we could decide to skip models on demand. + for k, v in report.get(_model, {}).iteritems(): + data['report_by_model'][model][k] = len(v) + item.report_html = template.render(data) + + @api.multi + def _compute_full_report_url(self): + for item in self: + item.full_report_url = \ + '/importer/import-recordset/{}'.format(item.id) + + def debug_mode(self): + return self.backend_id.debug_mode or \ + os.environ.get('IMPORTER_DEBUG_MODE') + + @api.multi + @api.depends('job_id.state', 'record_ids.job_id.state') + def _compute_jobs_global_state(self): + for item in self: + item.jobs_global_state = item._get_global_state() + + @api.model + def _get_global_state(self): + if not self.job_id: + return DONE + res = DONE + for item in self.record_ids: + if not item.job_id: + # TODO: investigate how this is possible + continue + # TODO: check why `item.job_state` does not reflect the job state + if item.job_id.state != DONE: + res = item.job_id.state + break + return res + + def available_models(self): + return self.import_type_id.available_models() + + @api.multi + @job + def import_recordset(self): + """This job will import a recordset.""" + with self.backend_id.get_environment(self._name) as env: + importer = env.get_connector_unit(Importer) + return importer.run(self) + + @api.multi + def run_import(self): + """ queue a job for creating records (import.record items) + """ + job_method = self.with_delay().import_recordset + if self.debug_mode(): + logger.warn('### DEBUG MODE ACTIVE: WILL NOT USE QUEUE ###') + job_method = self.import_recordset + + for item in self: + job = job_method() + if job: + # link the job + item.write({'job_id': job.db_record().id}) + if self.debug_mode(): + # debug mode, no job here: reset it! + item.write({'job_id': False}) + if self.debug_mode(): + # TODO: port this + # the "after_all" job needs to be fired manually when in debug mode + # since the event handler in .events.chunk_finished_subscriber + # cannot estimate when all the chunks have been processed. + # for model, importer in self.import_type_id.available_models(): + # import_record_after_all( + # session, + # self.backend_id.id, + # model, + # ) + pass + + @api.multi + def generate_report(self): + self.ensure_one() + reporter = self.get_source().get_reporter() + if not reporter: + logger.debug('No reporter found...') + return + metadata, content = reporter.report_get(self) + self.write({ + 'report_file': content.encode('base64'), + 'report_filename': metadata['complete_filename'] + }) + logger.info(( + 'Report file updated on recordset={}. ' + 'Filename: {}' + ).format(self.id, metadata['complete_filename'])) + + def _get_importers(self): + importers = OrderedDict() + + for _model, importer_dotted_path in self.available_models(): + model = self.env['ir.model'].search( + [('model', '=', _model)], limit=1) + with self.backend_id.get_environment(_model) as env: + importers[model] = get_record_importer( + env, importer_dotted_path=importer_dotted_path) + return importers + + @api.depends('import_type_id') + def _compute_docs_html(self): + template = self.env.ref('connector_importer.recordset_docs') + for item in self: + if isinstance(item.id, models.NewId): + continue + importers = item._get_importers() + data = { + 'recordset': item, + 'importers': importers, + } + item.docs_html = template.render(data) + + +# TODO +# @job +# def import_record_after_all( +# session, backend_id, model_name, last_record_id=None, **kw): +# """This job will import a record.""" +# # TODO: check this +# model = 'import.record' +# env = get_environment(session, model, backend_id) +# # recordset = None +# # if last_record_id: +# # record = env[model].browse(last_record_id) +# # recordset = record.recordset_id +# importer = get_record_importer(env) +# return importer.after_all() diff --git a/connector_importer/models/reporter.py b/connector_importer/models/reporter.py new file mode 100644 index 000000000..6e5e012de --- /dev/null +++ b/connector_importer/models/reporter.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +from openerp import models +import csv +import io +import time + + +class ReporterMixin(models.AbstractModel): + _name = 'reporter.mixin' + + report_extension = '.txt' + + def report_get(self, recordset, **options): + fileout = io.BytesIO() + self.report_do(recordset, fileout, **options) + self.report_finalize(recordset, fileout, **options) + metadata = self.report_get_metadata(recordset, **options) + return metadata, fileout.getvalue() + + def report_do(self, recordset, fileout, **options): + raise NotImplementedError() + + def report_finalize(self, recordset, fileout, **options): + """Apply late updates to report.""" + + def report_get_metadata(self, recordset, **options): + fname = str(time.time()) + ext = self.report_extension + return { + 'filename': fname, + 'ext': ext, + 'complete_filename': fname + ext, + } + + +class CSVReporter(models.AbstractModel): + """Produce a CSV feed.""" + _name = 'reporter.csv' + _inherit = 'reporter.mixin' + + report_extension = '.csv' + report_keys = ['skipped', 'errored'] + # flag to determine if status report + # must be grouped by status. + # If `True` report result will be merged by status (errored, skippeed, ...) + report_group_by_status = True + + def report_get_writer(self, fileout, columns, + delimiter=';', quotechar='"'): + writer = csv.DictWriter( + fileout, columns, + delimiter=delimiter, + quoting=csv.QUOTE_NONNUMERIC, + quotechar=quotechar) + writer.writeheader() + return writer + + def report_add_line(self, writer, item): + writer.writerow(item) + + def report_get_columns(self, recordset, orig_content, + extra_keys=[], delimiter=';'): + """Retrieve columns by recordset. + + :param recordset: instance of recordset. + :param orig_content: original csv content list of line. + :param extra_keys: report-related extra columns. + """ + # read only the 1st line of the original file + if orig_content: + line1 = orig_content[0].split(delimiter) + return line1 + extra_keys + return extra_keys + + def report_do(self, recordset, fileout, **options): + """Produce report.""" + json_report = recordset.get_report() + report_keys = options.get('report_keys', self.report_keys) + group_by_status = options.get( + 'group_by_status', self.report_group_by_status) + + model_keys = [ + x for x in json_report.iterkeys() if not x.startswith('_')] + + extra_keys = [ + self._report_make_key(x) for x in report_keys + ] + if not group_by_status: + # we produce one column per-model per-status + for model in model_keys: + for key in report_keys: + extra_keys.append(self._report_make_key(key, model=model)) + + source = recordset.get_source() + orig_content = source.csv_file.decode('base64').splitlines() + delimiter = source.csv_delimiter.encode('utf-8') + quotechar = source.csv_quotechar.encode('utf-8') + + columns = self.report_get_columns( + recordset, orig_content, + extra_keys=extra_keys, delimiter=delimiter) + + writer = self.report_get_writer( + fileout, columns, delimiter=delimiter, quotechar=quotechar) + + reader = csv.DictReader( + orig_content, delimiter=delimiter, quotechar=quotechar) + + self._report_do( + json_report=json_report, + reader=reader, + writer=writer, + model_keys=model_keys, + report_keys=report_keys, + group_by_status=group_by_status + ) + + def _report_do( + self, + json_report=None, + reader=None, + writer=None, + model_keys=None, + report_keys=None, + group_by_status=True): + + line_handler = self._report_line_by_model_and_status + if group_by_status: + line_handler = self._report_line_by_status + + grouped = self._report_group_by_line( + json_report, model_keys, report_keys) + + for line in reader: + line_handler(line, reader.line_num, grouped, model_keys) + self.report_add_line(writer, line) + + def _report_make_key(self, key, model=''): + if model: + return u'[R] {}: {}'.format(model, key) + return u'[R] {}'.format(key) + + def _report_group_by_line(self, json_report, model_keys, report_keys): + """Group report items by line number. + + Return something like: + + { + 'errored': {}, + 'skipped': { + 2: [ + { + u'line_nr': 2, + u'message': u'MISSING REQUIRED KEY=foo', + u'model': u'product.supplierinfo', + u'odoo_record': None + }, + { + u'line_nr': 2, + u'message': u'MISSING REQUIRED KEY=bla', + u'model': u'product.product', + u'odoo_record': None + }, + ], + 3: [ + { + u'line_nr': 3, + u'message': u'MISSING REQUIRED KEY=foo', + u'model': u'product.template', + u'odoo_record': None + }, + { + u'line_nr': 3, + u'message': u'ALREADY_EXISTS code=XXXX', + u'model': u'product.product', + u'odoo_record': None + }, + ], + } + """ + by_line = {} + for model in model_keys: + # list of messages + by_model = json_report.get(model, {}) + for key in report_keys: + by_line.setdefault(key, {}) + for item in by_model.get(key, []): + by_line[key].setdefault( + item['line_nr'], [] + ).append(item) + return by_line + + def _report_line_by_model_and_status( + self, line, line_num, grouped, model_keys): + """Get one column per each pair model-status.""" + for model in model_keys: + for status, lines in grouped.iteritems(): + # get info on current line if any + line_info = lines.get(line_num, {}) + # add the extra report column anyway + line[self._report_make_key(model, status)] = \ + line_info.get('message') + + def _report_line_by_status( + self, line, line_num, grouped, model_keys): + """Get one column per each status containing all modelss messages.""" + for status, by_line in grouped.iteritems(): + line_info = by_line.get(line_num, []) + line[self._report_make_key(status)] = '\n'.join([ + u'{model}: {message}'.format(**item) for item in line_info + ]) diff --git a/connector_importer/models/source.py b/connector_importer/models/source.py new file mode 100644 index 000000000..29bd31f6c --- /dev/null +++ b/connector_importer/models/source.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +from odoo import models, fields, api, tools + +from ..utils.importer_utils import gen_chunks, CSVReader, guess_csv_metadata + + +class ImportSourceConsumerdMixin(models.AbstractModel): + _name = 'import.source.consumer.mixin' + _description = 'Import source consumer' + + source_id = fields.Integer( + string='Source ID', + required=False, + ondelete='cascade', + ) + source_model = fields.Selection( + string='Source type', + selection='_selection_source_ref_id', + ) + source_ref_id = fields.Reference( + string='Source', + compute='_compute_source_ref_id', + selection='_selection_source_ref_id', + store=True, + ) + source_config_summary = fields.Html( + compute='_compute_source_config_summary', + readonly=True, + ) + + @api.multi + @api.depends('source_model', 'source_id') + def _compute_source_ref_id(self): + for item in self: + if not item.source_id or not item.source_model: + continue + item.source_ref_id = '{0.source_model},{0.source_id}'.format(item) + + @api.model + @tools.ormcache('self') + def _selection_source_ref_id(self): + domain = [('model', '=like', 'import.source.%')] + return [(r.model, r.name) + for r in self.env['ir.model'].search(domain) + if not r.model.endswith('mixin')] + + @api.multi + @api.depends('source_ref_id', ) + def _compute_source_config_summary(self): + for item in self: + if not item.source_ref_id: + continue + item.source_config_summary = item.source_ref_id.config_summary + + @api.multi + def open_source_config(self): + self.ensure_one() + action = self.env[self.source_model].get_formview_action() + action.update({ + 'views': [ + (self.env[self.source_model].get_config_view_id(), 'form'), + ], + 'res_id': self.source_id, + 'target': 'new', + }) + return action + + def get_source(self): + return self.source_ref_id + + +class ImportSource(models.AbstractModel): + _name = 'import.source' + _description = 'Import source' + _source_type = 'none' + _reporter_model = '' + + name = fields.Char( + compute=lambda self: self._source_type, + readony=True, + ) + chunk_size = fields.Integer( + required=True, + default=500, + string='Chunks Size' + ) + config_summary = fields.Html( + compute='_compute_config_summary', + readonly=True, + ) + + _config_summary_template = 'connector_importer.source_config_summary' + _config_summary_fields = ('chunk_size', ) + + @api.depends() + def _compute_config_summary(self): + template = self.env.ref(self._config_summary_template) + for item in self: + item.config_summary = template.render(item._config_summary_data()) + + def _config_summary_data(self): + info = [] + for fname in self._config_summary_fields: + info.append((fname, self[fname])) + return { + 'source': self, + 'summary_fields': self._config_summary_fields, + 'fields_info': self.fields_get(self._config_summary_fields), + } + + @api.model + def create(self, vals): + res = super(ImportSource, self).create(vals) + if self.env.context.get('active_model'): + # update reference on consumer + self.env[self.env.context['active_model']].browse( + self.env.context['active_id']).source_id = res.id + return res + + @api.multi + def get_lines(self): + self.ensure_one() + # retrieve lines + lines = self._get_lines() + + # sort them + lines_sorted = self._sort_lines(lines) + + for i, chunk in enumerate(gen_chunks(lines_sorted, + chunksize=self.chunk_size)): + # get out of chunk iterator + yield list(chunk) + + def _get_lines(self): + raise NotImplementedError() + + def _sort_lines(self, lines): + return lines + + def get_config_view_id(self): + return self.env['ir.ui.view'].search([ + ('model', '=', self._name), + ('type', '=', 'form')], limit=1).id + + def get_reporter(self): + return self.env.get(self._reporter_model) + + +class CSVSource(models.Model): + _name = 'import.source.csv' + _inherit = 'import.source' + _description = 'CSV import source' + _source_type = 'csv' + _reporter_model = 'reporter.csv' + + csv_file = fields.Binary('CSV file') + # use these to load file from an FS path + csv_filename = fields.Char('CSV filename') + csv_filesize = fields.Char( + string='CSV filesize', + compute='_compute_csv_filesize', + readonly=True, + ) + csv_path = fields.Char('CSV path') + csv_delimiter = fields.Char( + string='CSV delimiter', + default=';', + ) + csv_quotechar = fields.Char( + string='CSV quotechar', + default='"', + ) + _config_summary_fields = ImportSource._config_summary_fields + ( + 'csv_filename', 'csv_filesize', 'csv_delimiter', 'csv_quotechar', + ) + + @api.onchange('csv_file') + def _onchance_csv_file(self): + if self.csv_file: + meta = guess_csv_metadata(self.csv_file.decode('base64')) + if meta: + self.csv_delimiter = meta['delimiter'] + self.csv_quotechar = meta['quotechar'] + + def _filesize_human(self, size, suffix='B'): + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: + if abs(size) < 1024.0: + return "%3.1f%s%s" % (size, unit, suffix) + size /= 1024.0 + return "%.1f%s%s" % (size, 'Y', suffix) + + @api.depends('csv_file') + def _compute_csv_filesize(self): + for item in self: + if item.csv_file: + item.csv_filesize = self._filesize_human( + len(item.csv_file.decode('base64'))) + + def _get_lines(self): + # read CSV + reader_args = { + 'delimiter': self.csv_delimiter, + } + if self.csv_path: + # TODO: join w/ filename + reader_args['filepath'] = self.csv_path + else: + reader_args['filedata'] = self.csv_file + + reader = CSVReader(**reader_args) + return reader.read_lines() + + # TODO: this stuff is now unrelated from backend version must be refactored + # # handy fields to make the example attachment + # # downloadable within recordset view + # example_file_xmlid = fields.Char() + # example_file_url = fields.Char( + # string='Download example file', + # compute='_compute_example_file_url', + # readonly=True, + # ) + # + # def _get_example_attachment(self): + # # You can define example file by creating attachments + # # with an xmlid matching the import type/key + # # `connector_importer.example_file_$version_key` + # if not self.backend_id.version or not self.import_type_id: + # return + # xmlid = self.example_file_xmlid + # if not xmlid: + # xmlid = u'connector_importer.examplefile_{}_{}'.format( + # self.backend_id.version.replace('.', '_'), + # self.import_type_id.key) + # return self.env.ref(xmlid, raise_if_not_found=0) + # + # @api.depends('backend_id.version', 'import_type_id', 'example_file_xmlid') + # def _compute_example_file_url(self): + # att = self._get_example_attachment() + # if att: + # self.example_file_url = u'/web/content/{}/{}'.format( + # att.id, att.name) diff --git a/connector_importer/security/ir.model.access.csv b/connector_importer/security/ir.model.access.csv new file mode 100644 index 000000000..e551c7a48 --- /dev/null +++ b/connector_importer/security/ir.model.access.csv @@ -0,0 +1,8 @@ +id,name,model_id:id,group_id:id,perm_read,perm_write,perm_create,perm_unlink +access_import_recordset,connector_importer.access_import_recordset,model_import_recordset,connector.group_connector_manager,1,1,1,1 +access_import_record,connector_importer.access_import_record,model_import_record,connector.group_connector_manager,1,1,1,1 +access_import_type,connector_importer.access_import_type,model_import_type,connector.group_connector_manager,1,1,1,1 +access_import_backend_user,connector_importer.access_import_backend_user,model_import_backend,connector_importer.connector_importer_user,1,0,0,0 +access_import_recordset_user,connector_importer.access_import_recordset_user,model_import_recordset,connector_importer.connector_importer_user,1,0,0,0 +access_import_type_user,connector_importer.access_import_type_user,model_import_type,connector_importer.connector_importer_user,1,0,0,0 +access_connector_queue_job_user,connector job user,connector.model_queue_job,connector_importer.connector_importer_user,1,0,0,0 diff --git a/connector_importer/security/security.xml b/connector_importer/security/security.xml new file mode 100644 index 000000000..03e48fcdb --- /dev/null +++ b/connector_importer/security/security.xml @@ -0,0 +1,11 @@ + + + + + + Connector product importer user + + + + + diff --git a/connector_importer/tests/test_all.py b/connector_importer/tests/test_all.py new file mode 100644 index 000000000..13cfd9ced --- /dev/null +++ b/connector_importer/tests/test_all.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). diff --git a/connector_importer/tests/test_backend.py b/connector_importer/tests/test_backend.py new file mode 100644 index 000000000..f72990f6b --- /dev/null +++ b/connector_importer/tests/test_backend.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + + +import odoo.tests.common as common + + +class TestAll(common.TransactionCase): + + def setUp(self): + super(TestAll, self).setUp() + self.backend_model = self.env['importer.backend'] + + def test_backend_create(self): + b1 = self.backend_model.create({}) + self.assertTrue(b1) diff --git a/connector_importer/utils/__init__.py b/connector_importer/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/connector_importer/utils/importer_utils.py b/connector_importer/utils/importer_utils.py new file mode 100644 index 000000000..75c19f2ec --- /dev/null +++ b/connector_importer/utils/importer_utils.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import csv +import base64 +import time +from chardet.universaldetector import UniversalDetector + +from cStringIO import StringIO + +from ..log import logger + + +def get_encoding(data): + """ try to get encoding incrementally + see http://chardet.readthedocs.org/en/latest/usage.html + #example-detecting-encoding-incrementally + """ + start = time.time() + msg = 'detecting file encoding...' + logger.info(msg) + file_like = StringIO(data) + detector = UniversalDetector() + for i, line in enumerate(file_like): + detector.feed(line) + if detector.done: + break + detector.close() + msg = 'encoding found in %s sec' % str(time.time() - start) + msg += str(detector.result) + logger.info(msg) + return detector.result + + +def csv_content_to_file(data): + """ odoo binary fields spit out b64 data + """ + # guess encoding via chardet (LOVE IT! :)) + encoding_info = get_encoding(data) + encoding = encoding_info['encoding'] + if encoding is None or encoding != 'utf-8': + try: + data_str = data.decode(encoding) + except (UnicodeDecodeError, TypeError): + # dirty fallback in case + # we don't spot the right encoding above + for enc in ('utf-16le', 'latin-1', 'ascii', ): + try: + data_str = data.decode(enc) + break + except UnicodeDecodeError: + data_str = data + data_str = data_str.encode('utf-8') + else: + data_str = data + return StringIO(data_str) + + +def guess_csv_metadata(filecontent): + f = StringIO(filecontent) + try: + dialect = csv.Sniffer().sniff(f.readline(), "\t,;") + f.seek(0) + meta = { + 'delimiter': dialect.delimiter, + 'quotechar': dialect.quotechar, + } + except: + meta = {} + return meta + + +def read_path(path): + with file(path, 'r') as thefile: + return thefile.read() + + +class CSVReader(object): + + def __init__(self, + filepath=None, + filedata=None, + delimiter='|', + quotechar='"', + fieldnames=None): + assert filedata or filepath, 'Provide a file path or some file data!' + if filepath: + filedata = read_path(filepath) + else: + filedata = base64.decodestring(filedata) + # remove NULL byte + filedata = filedata.replace('\x00', '') + self.data = csv_content_to_file(filedata) + self.delimiter = delimiter + self.quotechar = quotechar + self.fieldnames = fieldnames + + def read_lines(self): + """ return iterator that yields lines + and add info to them (like line nr). + """ + self.data.seek(0) + reader = csv.DictReader( + self.data, + delimiter=str(self.delimiter), + quotechar=str(self.quotechar), + fieldnames=self.fieldnames, + ) + for line in reader: + line['_line_nr'] = reader.line_num + yield line + + +def gen_chunks(iterable, chunksize=10): + """Chunk generator. + + Take an iterable and yield `chunksize` sized slices. + """ + chunk = [] + for i, line in enumerate(iterable): + if (i % chunksize == 0 and i > 0): + yield chunk + del chunk[:] + chunk.append(line) + yield chunk diff --git a/connector_importer/utils/mapper_utils.py b/connector_importer/utils/mapper_utils.py new file mode 100644 index 000000000..86d49169d --- /dev/null +++ b/connector_importer/utils/mapper_utils.py @@ -0,0 +1,291 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import pytz +from datetime import datetime + +from odoo import fields + +from ..log import logger + +FMTS = ( + '%d/%m/%Y', +) + +FMTS_DT = ( + '%Y-%m-%d %H:%M:%S', + '%Y-%m-%d %H:%M:%S.000' +) + + +def to_date(value, formats=FMTS): + """Convert date strings to odoo format.""" + + for fmt in formats: + try: + value = datetime.strptime(value, fmt).date() + break + except ValueError: + pass + if not isinstance(value, basestring): + try: + return fields.Date.to_string(value) + except ValueError: + pass + # the value has not been converted, + # maybe because is like 00/00/0000 + # or in another bad format + return None + + +def to_utc_datetime(orig_value, tz='Europe/Rome'): + """Convert date strings to odoo format respecting TZ.""" + value = orig_value + local_tz = pytz.timezone('Europe/Rome') + for fmt in FMTS_DT: + try: + naive = datetime.strptime(orig_value, fmt) + local_dt = local_tz.localize(naive, is_dst=None) + value = local_dt.astimezone(pytz.utc) + break + except ValueError: + pass + if not isinstance(value, basestring): + return fields.Datetime.to_string(value) + # the value has not been converted, + # maybe because is like 00/00/0000 + # or in another bad format + return None + + +def to_safe_float(value): + """Safely convert to float.""" + if isinstance(value, float): + return value + if not value: + return 0.0 + try: + return float(value.replace(',', '.')) + except ValueError: + return 0.0 + + +def to_safe_int(value): + """Safely convert to integer.""" + if isinstance(value, int): + return value + if not value: + return 0 + try: + return int(value.replace(',', '').replace('.', '')) + except ValueError: + return 0 + + +CONV_MAPPING = { + 'date': to_date, + 'utc_date': to_utc_datetime, + 'safe_float': to_safe_float, + 'safe_int': to_safe_int, +} + + +def convert(field, conv_type, + fallback_field=None, + pre_value_handler=None, + **kw): + """ Convert the source field to a defined ``conv_type`` + (ex. str) before returning it. + You can also use predefined converters like 'date'. + Use ``fallback_field`` to provide a field of the same type + to be used in case the base field has no value. + """ + if conv_type in CONV_MAPPING: + conv_type = CONV_MAPPING[conv_type] + + def modifier(self, record, to_attr): + if field not in record: + # be gentle + logger.warn( + 'Field `%s` missing in line `%s`', field, record['_line_nr']) + return None + value = record.get(field) + if not value and fallback_field: + value = record[fallback_field] + if pre_value_handler: + value = pre_value_handler(value) + # do not use `if not value` otherwise you override all zero values + if value is None: + return None + return conv_type(value, **kw) + + return modifier + + +def from_mapping(field, mapping, default_value=None): + """ Convert the source value using a ``mapping`` of values. + """ + + def modifier(self, record, to_attr): + value = record.get(field) + return mapping.get(value, default_value) + + return modifier + + +def concat(field, separator=' ', handler=None): + """Concatenate values from different fields.""" + + # TODO: `field` is actually a list of fields. + # `field` attribute is required ATM by the base connector mapper and + # `_direct_source_field_name` raises and error if you don't specify it. + # Check if we can get rid of it. + + def modifier(self, record, to_attr): + value = [ + record.get(_field, '') + for _field in field if record.get(_field, '').strip() + ] + return separator.join(value) + + return modifier + +# TODO: consider to move this to mapper base klass +# to ease maintanability and override + + +def backend_to_rel(field, + search_field=None, + search_operator=None, + value_handler=None, + default_search_value=None, + default_search_field=None, + search_value_handler=None, + allowed_length=None, + create_missing=False, + create_missing_handler=None,): + """ A modifier intended to be used on the ``direct`` mappings. + + Example:: + + direct = [(backend_to_rel('country', + search_field='code', + default_search_value='IT', + allowed_length=2), 'country_id'),] + + :param field: name of the source field in the record + :param search_field: name of the field to be used for searching + :param search_operator: operator to be used for searching + :param value_handler: a function to manipulate the raw value + before using it. You can use it to strip out none values + that are not none, like '0' instead of an empty string. + :param default_search_value: if the value is none you can provide + a default value to look up + :param default_search_field: if the value is none you can provide + a different field to look up for the default value + :param search_value_handler: a callable to use + to manipulate value before searching + :param allowed_length: enforce a check on the search_value length + :param create_missing: create a new record if not found + :param create_missing_handler: provide an handler + for getting new values for a new record to be created. + """ + + def modifier(self, record, to_attr): + search_value = record.get(field) + + if search_value and value_handler: + search_value = value_handler(self, record, search_value) + + # handle defaults if no search value here + if not search_value and default_search_value: + search_value = default_search_value + if default_search_field: + modifier.search_field = default_search_field + + # get the real column and the model + column = self.model._fields[to_attr] + rel_model = \ + self.env[column.comodel_name].with_context(active_test=False) + + if allowed_length and len(search_value) != allowed_length: + return None + + # alter search value if handler is given + if search_value and search_value_handler: + search_value = search_value_handler(search_value) + + if not search_value: + return None + + search_operator = '=' + if column.type.endswith('2many'): + # we need multiple values + search_operator = 'in' + if not isinstance(search_value, (list, tuple)): + search_value = [search_value] + + if modifier.search_operator: + # override by param + search_operator = modifier.search_operator + + # finally search it + search_args = [(modifier.search_field, + search_operator, + search_value)] + + value = rel_model.search(search_args) + + if (column.type.endswith('2many') and + isinstance(search_value, (list, tuple)) and + not len(search_value) == len(value or [])): + # make sure we consider all the values and related records + # that we pass here. + # If one of them is missing we have to create them all before. + # If `create_missing_handler` is given, it must make sure + # to create all the missing records and return existing ones too. + # Typical use case is: product categories. + # If we pass ['Categ1', 'Categ2', 'Categ3'] we want them all, + # and if any of them is missing we might want to create them + # using a `create_missing_handler`. + value = None + + # create if missing + if not value and create_missing: + try: + if create_missing_handler: + value = create_missing_handler(self, rel_model, record) + else: + value = rel_model.create({'name': record[field]}) + except Exception, e: + msg = ( + '`backend_to_rel` failed creation. ' + '[model: %s] [line: %s] [to_attr: %s] ' + 'Error: %s' + ) + logger.error( + msg, rel_model._name, record['_line_nr'], to_attr, str(e) + ) + return None + + # handle the final value based on col type + if value: + if column.type == 'many2one': + value = value[0].id + if column.type in ('one2many', 'many2many'): + value = [(6, 0, [x.id for x in value])] + else: + return None + + return value + + # use method attributes to not mess up the variables' scope. + # If we change the var inside modifier, without this trick + # you get UnboundLocalError, as the variable was never defined. + # Trick tnx to http://stackoverflow.com/a/27910553/647924 + modifier.search_field = search_field or 'name' + modifier.search_operator = search_operator or None + + return modifier diff --git a/connector_importer/utils/misc.py b/connector_importer/utils/misc.py new file mode 100644 index 000000000..642a48daa --- /dev/null +++ b/connector_importer/utils/misc.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import importlib + +# http://chase-seibert.github.io/blog/2014/04/23/python-imp-examples.html + + +def import_klass_from_dotted_path(dotted_path, path=None): + """Load a klass via dotted path.""" + + module, klass_name = dotted_path.rsplit('.', 1) + return getattr(importlib.import_module(module), klass_name) diff --git a/connector_importer/utils/report_html.py b/connector_importer/utils/report_html.py new file mode 100644 index 000000000..6ddcf8b2c --- /dev/null +++ b/connector_importer/utils/report_html.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Author: Simone Orsi +# Copyright 2017 Camptocamp SA +# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). + +import json + +EXAMPLEDATA = { + "last_summary": { + "updated": 0, "skipped": 584, "errors": 0, "created": 414 + }, + "errors": [], + "last_start": "08/03/2017 13:46", + "skipped": [ + {"model": "product.template", + "line": 3, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171}, + {"model": "product.template", + "line": 4, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171}, + {"model": "product.template", + "line": 5, + "message": "ALREADY EXISTS code: 8482", + "odoo_record": 6171}, + ], +} +JSONDATA = json.dumps(EXAMPLEDATA) + + +def link_record(record_id, model='', record=None, + name_field='name', target='_new'): + """Link an existing odoo record.""" + name = 'View' + if record: + default = getattr(record, '_rec_name', 'Unknown') + name = getattr(record, name_field, default) + model = record._name + link = ( + """{name}""" + ).format( + id=record_id, + model=model, + name=name, + target=target, + ) + return link + + +class Reporter(object): + """Produce a formatted HTML report from importer json data.""" + + def __init__(self, jsondata, detailed=False, full_url=''): + self._jsondata = jsondata + self._data = json.loads(self._jsondata) + self._html = [] + self._detailed = detailed + self._full_url = full_url + + def html(self, wrapped=True): + """Return HTML report.""" + self._produce() + content = ''.join(self._html) + if wrapped: + return self._wrap( + 'html', self._wrap('body', content) + ) + return content + + def _add(self, el): + self._html.append(el) + + def _wrap(self, tag, content): + return '<{tag}>{content}'.format(tag=tag, content=content) + + def _line(self, content): + return self._wrap('p', content) + + def _value(self, key, value): + return self._wrap('strong', key.capitalize() + ': ') + str(value) + + def _value_line(self, key, value): + return self._line( + self._value(key, value) + ) + + def _line_to_msg(self, line): + res = [] + if line.get('line'): + res.append('CSV line: {}, '.format(line['line'])) + if line.get('message'): + res.append(line['message']) + if 'odoo_record' in line and 'model' in line: + res.append( + link_record(line['odoo_record'], model=line['model']) + ) + return ' '.join(res) + + def _listing(self, lines, list_type='ol'): + _lines = [] + for line in lines: + _lines.append(self._wrap('li', self._line_to_msg(line))) + return self._wrap( + list_type, ''.join(_lines) + ) + + def _produce(self): + if not self._data.get('last_summary'): + return + # header + self._add(self._wrap('h2', 'Last summary')) + # start date + self._add(self._value_line('Last start', self._data['last_start'])) + # global counters + summary_items = self._data['last_summary'].items() + for key, value in summary_items: + last = key == summary_items[-1][0] + self._add(self._value(key, value) + (' - ' if not last else '')) + if self._detailed: + self._add(self._wrap('h3', 'Details')) + if self._data['skipped']: + self._add(self._wrap('h4', 'Skipped')) + # skip messages + self._add(self._listing(self._data['skipped'])) + if self._data['errors']: + self._add(self._wrap('h4', 'Errors')) + # skip messages + self._add(self._listing(self._data['errors'])) + if self._full_url: + link = ( + 'View full report' + ).format(self._full_url) + self._add(self._line(link)) + + +if __name__ == '__main__': + reporter = Reporter(JSONDATA, detailed=1) + print reporter.html() diff --git a/connector_importer/views/docs_template.xml b/connector_importer/views/docs_template.xml new file mode 100644 index 000000000..1ad684c0e --- /dev/null +++ b/connector_importer/views/docs_template.xml @@ -0,0 +1,109 @@ + + + + + + diff --git a/connector_importer/views/import_backend_views.xml b/connector_importer/views/import_backend_views.xml new file mode 100644 index 000000000..324589a29 --- /dev/null +++ b/connector_importer/views/import_backend_views.xml @@ -0,0 +1,102 @@ + + + + + + import.backend + +
+ +

Import

+ + + + + + + + + + + + + + + + + +