From cab5aa897941d2bd2268eded871b8de7691ef310 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Fri, 31 May 2019 19:19:25 -0400 Subject: [PATCH 01/14] rcv.parse_csv: add `parse_rcv_compra_X_csv_file` For parsing all the existing alternatives of an RCV CSV file for "Compra" (purchase), add these functions and their corresponding schemas: - `parse_rcv_compra_registro_csv_file`: "Registro" - `parse_rcv_compra_no_incluir_csv_file`: "No incluir" - `parse_rcv_compra_reclamado_csv_file`: "Reclamado" - `parse_rcv_compra_pendiente_csv_file`: "Pendiente" The implementations are very similar to `parse_rcv_venta_csv_file` (added in PR #52, commit c4fbfc13) and its interface is very different to the old `process_rcv_csv_file` (among other things, instead of calling a given handler on each row it yields the row), which will be removed. Tests have not been implemented. --- cl_sii/rcv/parse_csv.py | 600 ++++++++++++++++++++++++++++++++++++ tests/test_rcv_parse_csv.py | 48 ++- 2 files changed, 647 insertions(+), 1 deletion(-) diff --git a/cl_sii/rcv/parse_csv.py b/cl_sii/rcv/parse_csv.py index 392c4e7a..12abf4c7 100644 --- a/cl_sii/rcv/parse_csv.py +++ b/cl_sii/rcv/parse_csv.py @@ -139,6 +139,303 @@ def parse_rcv_venta_csv_file( ) +def parse_rcv_compra_registro_csv_file( + receptor_rut: Rut, + receptor_razon_social: str, + input_file_path: str, + n_rows_offset: int = 0, + max_n_rows: int = None, +) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: + """ + Parse DTE data objects from a RCV "Compra/Registro" file (CSV). + + """ + schema_context = dict( + receptor_rut=receptor_rut, + receptor_razon_social=receptor_razon_social, + ) + input_csv_row_schema = RcvCompraRegistroCsvRowSchema(context=schema_context) + + expected_input_field_names = ( + 'Nro', + 'Tipo Doc', # 'tipo_dte' + 'Tipo Compra', + 'RUT Proveedor', # 'emisor_rut' + 'Razon Social', # 'emisor_razon_social' + 'Folio', # 'folio' + 'Fecha Docto', # 'fecha_emision_date' + 'Fecha Recepcion', # 'fecha_recepcion_dt' + 'Fecha Acuse', # 'fecha_acuse_dt' + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Total', # 'monto_total' + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'Tabacos Puros', + 'Tabacos Cigarrillos', + 'Tabacos Elaborados', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + fields_to_remove_names = ( + 'Nro', + 'Tipo Compra', + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'Tabacos Puros', + 'Tabacos Cigarrillos', + 'Tabacos Elaborados', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + yield from _parse_rcv_csv_file( + input_csv_row_schema, + expected_input_field_names, + fields_to_remove_names, + input_file_path, + n_rows_offset, + max_n_rows, + ) + + +def parse_rcv_compra_no_incluir_csv_file( + receptor_rut: Rut, + receptor_razon_social: str, + input_file_path: str, + n_rows_offset: int = 0, + max_n_rows: int = None, +) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: + """ + Parse DTE data objects from a RCV "Compra/no incluir" file (CSV). + + """ + schema_context = dict( + receptor_rut=receptor_rut, + receptor_razon_social=receptor_razon_social, + ) + input_csv_row_schema = RcvCompraNoIncluirCsvRowSchema(context=schema_context) + + expected_input_field_names = ( + 'Nro', + 'Tipo Doc', # 'tipo_dte' + 'Tipo Compra', + 'RUT Proveedor', # 'emisor_rut' + 'Razon Social', # 'emisor_razon_social' + 'Folio', # 'folio' + 'Fecha Docto', # 'fecha_emision_date' + 'Fecha Recepcion', # 'fecha_recepcion_dt' + 'Fecha Acuse', # 'fecha_acuse_dt' + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Total', # 'monto_total' + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + fields_to_remove_names = ( + 'Nro', + 'Tipo Compra', + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + yield from _parse_rcv_csv_file( + input_csv_row_schema, + expected_input_field_names, + fields_to_remove_names, + input_file_path, + n_rows_offset, + max_n_rows, + ) + + +def parse_rcv_compra_reclamado_csv_file( + receptor_rut: Rut, + receptor_razon_social: str, + input_file_path: str, + n_rows_offset: int = 0, + max_n_rows: int = None, +) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: + """ + Parse DTE data objects from a RCV "Compra/reclamado" file (CSV). + + """ + schema_context = dict( + receptor_rut=receptor_rut, + receptor_razon_social=receptor_razon_social, + ) + input_csv_row_schema = RcvCompraReclamadoCsvRowSchema(context=schema_context) + + expected_input_field_names = ( + 'Nro', + 'Tipo Doc', # 'tipo_dte' + 'Tipo Compra', + 'RUT Proveedor', # 'emisor_rut' + 'Razon Social', # 'emisor_razon_social' + 'Folio', # 'folio' + 'Fecha Docto', # 'fecha_emision_date' + 'Fecha Recepcion', # 'fecha_recepcion_dt' + 'Fecha Reclamo', # 'fecha_reclamo_dt' + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Total', # 'monto_total' + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + fields_to_remove_names = ( + 'Nro', + 'Tipo Compra', + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + yield from _parse_rcv_csv_file( + input_csv_row_schema, + expected_input_field_names, + fields_to_remove_names, + input_file_path, + n_rows_offset, + max_n_rows, + ) + + +def parse_rcv_compra_pendiente_csv_file( + receptor_rut: Rut, + receptor_razon_social: str, + input_file_path: str, + n_rows_offset: int = 0, + max_n_rows: int = None, +) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: + """ + Parse DTE data objects from a RCV "Compra/pendiente" file (CSV). + + """ + schema_context = dict( + receptor_rut=receptor_rut, + receptor_razon_social=receptor_razon_social, + ) + input_csv_row_schema = RcvCompraPendienteCsvRowSchema(context=schema_context) + + expected_input_field_names = ( + 'Nro', + 'Tipo Doc', # 'tipo_dte' + 'Tipo Compra', + 'RUT Proveedor', # 'emisor_rut' + 'Razon Social', # 'emisor_razon_social' + 'Folio', # 'folio' + 'Fecha Docto', # 'fecha_emision_date' + 'Fecha Recepcion', # 'fecha_recepcion_dt' + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Total', # 'monto_total' + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + fields_to_remove_names = ( + 'Nro', + 'Tipo Compra', + 'Monto Exento', + 'Monto Neto', + 'Monto IVA Recuperable', + 'Monto Iva No Recuperable', + 'Codigo IVA No Rec.', + 'Monto Neto Activo Fijo', + 'IVA Activo Fijo', + 'IVA uso Comun', + 'Impto. Sin Derecho a Credito', + 'IVA No Retenido', + 'NCE o NDE sobre Fact. de Compra', + 'Codigo Otro Impuesto', + 'Valor Otro Impuesto', + 'Tasa Otro Impuesto', + ) + + yield from _parse_rcv_csv_file( + input_csv_row_schema, + expected_input_field_names, + fields_to_remove_names, + input_file_path, + n_rows_offset, + max_n_rows, + ) + + ############################################################################### # schemas ############################################################################### @@ -307,6 +604,309 @@ def postprocess(self, data: dict) -> dict: return data +class RcvCompraRegistroCsvRowSchema(_RcvCsvRowSchemaBase): + + FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_ACUSE_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + + class Meta: + strict = True + + ########################################################################### + # basic fields + ########################################################################### + + emisor_rut = mm_fields.RutField( + required=True, + load_from='RUT Proveedor', + ) + tipo_dte = mm_fields.TipoDteField( + required=True, + load_from='Tipo Doc', + ) + folio = marshmallow.fields.Integer( + required=True, + load_from='Folio', + ) + fecha_emision_date = mm_utils.CustomMarshmallowDateField( + format='%d/%m/%Y', # e.g. '22/10/2018' + required=True, + load_from='Fecha Docto', + ) + monto_total = marshmallow.fields.Integer( + required=True, + load_from='Monto Total', + ) + emisor_razon_social = marshmallow.fields.String( + required=True, + load_from='Razon Social', + ) + + ########################################################################### + # fields whose value is set using data passed in the schema context + ########################################################################### + + receptor_rut = mm_fields.RutField( + required=True, + ) + receptor_razon_social = marshmallow.fields.String( + required=True, + ) + + ########################################################################### + # extra fields: not included in the returned struct + ########################################################################### + + fecha_recepcion_dt = marshmallow.fields.DateTime( + format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' + required=True, + load_from='Fecha Recepcion', + ) + fecha_acuse_dt = marshmallow.fields.DateTime( + format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' + required=True, + allow_none=True, + load_from='Fecha Acuse', + ) + + @marshmallow.pre_load + def preprocess(self, in_data: dict) -> dict: + # note: required fields checks are run later on automatically thus we may not assume that + # values of required fields (`required=True`) exist. + + # Set field value only if it was not in the input data. + in_data.setdefault('receptor_rut', self.context['receptor_rut']) + in_data.setdefault('receptor_razon_social', self.context['receptor_razon_social']) + + # Fix missing/default values. + if 'Fecha Acuse' in in_data: + if in_data['Fecha Acuse'] == '': + in_data['Fecha Acuse'] = None + + return in_data + + @marshmallow.post_load + def postprocess(self, data: dict) -> dict: + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13' + data['fecha_recepcion_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_recepcion_dt'], tz=self.FIELD_FECHA_RECEPCION_DT_TZ) + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13-03:00' + # >>> data['fecha_recepcion_dt'].astimezone(pytz.UTC).isoformat() + # '2018-10-23T04:54:13+00:00' + + if data['fecha_acuse_dt']: + data['fecha_acuse_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_acuse_dt'], tz=self.FIELD_FECHA_ACUSE_DT_TZ) + + # note: to express this value in another timezone (but the value does not change), do + # `dt_obj.astimezone(pytz.timezone('some timezone'))` + + return data + + +RcvCompraNoIncluirCsvRowSchema = RcvCompraRegistroCsvRowSchema + + +class RcvCompraReclamadoCsvRowSchema(_RcvCsvRowSchemaBase): + + FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_RECLAMO_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + + class Meta: + strict = True + + ########################################################################### + # basic fields + ########################################################################### + + emisor_rut = mm_fields.RutField( + required=True, + load_from='RUT Proveedor', + ) + tipo_dte = mm_fields.TipoDteField( + required=True, + load_from='Tipo Doc', + ) + folio = marshmallow.fields.Integer( + required=True, + load_from='Folio', + ) + fecha_emision_date = mm_utils.CustomMarshmallowDateField( + format='%d/%m/%Y', # e.g. '22/10/2018' + required=True, + load_from='Fecha Docto', + ) + monto_total = marshmallow.fields.Integer( + required=True, + load_from='Monto Total', + ) + emisor_razon_social = marshmallow.fields.String( + required=True, + load_from='Razon Social', + ) + + ########################################################################### + # fields whose value is set using data passed in the schema context + ########################################################################### + + receptor_rut = mm_fields.RutField( + required=True, + ) + receptor_razon_social = marshmallow.fields.String( + required=True, + ) + + ########################################################################### + # extra fields: not included in the returned struct + ########################################################################### + + fecha_recepcion_dt = marshmallow.fields.DateTime( + format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' + required=True, + load_from='Fecha Recepcion', + ) + fecha_reclamo_dt = marshmallow.fields.DateTime( + # note: for some reason the DTEs with `tipo_dte=` + # (and maybe others as well) do not have this field set (always? we do not know). + format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' + required=False, + allow_none=True, + load_from='Fecha Reclamo', + ) + + @marshmallow.pre_load + def preprocess(self, in_data: dict) -> dict: + # note: required fields checks are run later on automatically thus we may not assume that + # values of required fields (`required=True`) exist. + + # Set field value only if it was not in the input data. + in_data.setdefault('receptor_rut', self.context['receptor_rut']) + in_data.setdefault('receptor_razon_social', self.context['receptor_razon_social']) + + # Fix missing/default values. + # note: for some reason the DTEs with `tipo_dte=` + # (and maybe others as well) do not have this field set (always? we do not know). + if 'Fecha Reclamo' in in_data: + if in_data['Fecha Reclamo'] == '' or 'null' in in_data['Fecha Reclamo']: + in_data['Fecha Reclamo'] = None + + return in_data + + @marshmallow.post_load + def postprocess(self, data: dict) -> dict: + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13' + data['fecha_recepcion_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_recepcion_dt'], tz=self.FIELD_FECHA_RECEPCION_DT_TZ) + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13-03:00' + # >>> data['fecha_recepcion_dt'].astimezone(pytz.UTC).isoformat() + # '2018-10-23T04:54:13+00:00' + + if data['fecha_reclamo_dt']: + data['fecha_reclamo_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_reclamo_dt'], tz=self.FIELD_FECHA_RECLAMO_DT_TZ) + + # note: to express this value in another timezone (but the value does not change), do + # `dt_obj.astimezone(pytz.timezone('some timezone'))` + + return data + + +class RcvCompraPendienteCsvRowSchema(_RcvCsvRowSchemaBase): + + FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_ACUSE_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + + class Meta: + strict = True + + ########################################################################### + # basic fields + ########################################################################### + + emisor_rut = mm_fields.RutField( + required=True, + load_from='RUT Proveedor', + ) + tipo_dte = mm_fields.TipoDteField( + required=True, + load_from='Tipo Doc', + ) + folio = marshmallow.fields.Integer( + required=True, + load_from='Folio', + ) + fecha_emision_date = mm_utils.CustomMarshmallowDateField( + format='%d/%m/%Y', # e.g. '22/10/2018' + required=True, + load_from='Fecha Docto', + ) + monto_total = marshmallow.fields.Integer( + required=True, + load_from='Monto Total', + ) + emisor_razon_social = marshmallow.fields.String( + required=True, + load_from='Razon Social', + ) + + ########################################################################### + # fields whose value is set using data passed in the schema context + ########################################################################### + + receptor_rut = mm_fields.RutField( + required=True, + ) + receptor_razon_social = marshmallow.fields.String( + required=True, + ) + + ########################################################################### + # extra fields: not included in the returned struct + ########################################################################### + + fecha_recepcion_dt = marshmallow.fields.DateTime( + format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' + required=True, + load_from='Fecha Recepcion', + ) + + @marshmallow.pre_load + def preprocess(self, in_data: dict) -> dict: + # note: required fields checks are run later on automatically thus we may not assume that + # values of required fields (`required=True`) exist. + + # Set field value only if it was not in the input data. + in_data.setdefault('receptor_rut', self.context['receptor_rut']) + in_data.setdefault('receptor_razon_social', self.context['receptor_razon_social']) + + # Fix missing/default values. + if 'Fecha Acuse' in in_data: + if in_data['Fecha Acuse'] == '': + in_data['Fecha Acuse'] = None + + return in_data + + @marshmallow.post_load + def postprocess(self, data: dict) -> dict: + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13' + data['fecha_recepcion_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_recepcion_dt'], tz=self.FIELD_FECHA_RECEPCION_DT_TZ) + # >>> data['fecha_recepcion_dt'].isoformat() + # '2018-10-23T01:54:13-03:00' + # >>> data['fecha_recepcion_dt'].astimezone(pytz.UTC).isoformat() + # '2018-10-23T04:54:13+00:00' + + # note: to express this value in another timezone (but the value does not change), do + # `dt_obj.astimezone(pytz.timezone('some timezone'))` + + return data + + ############################################################################### # helpers ############################################################################### diff --git a/tests/test_rcv_parse_csv.py b/tests/test_rcv_parse_csv.py index 6dc7741c..a03110df 100644 --- a/tests/test_rcv_parse_csv.py +++ b/tests/test_rcv_parse_csv.py @@ -1,7 +1,13 @@ import unittest from cl_sii.rcv.parse_csv import ( # noqa: F401 - RcvVentaCsvRowSchema, parse_rcv_venta_csv_file, _parse_rcv_csv_file, + RcvCompraNoIncluirCsvRowSchema, RcvCompraPendienteCsvRowSchema, + RcvCompraReclamadoCsvRowSchema, RcvCompraRegistroCsvRowSchema, + RcvVentaCsvRowSchema, + parse_rcv_compra_no_incluir_csv_file, parse_rcv_compra_pendiente_csv_file, + parse_rcv_compra_reclamado_csv_file, parse_rcv_compra_registro_csv_file, + parse_rcv_venta_csv_file, + _parse_rcv_csv_file, ) @@ -11,12 +17,52 @@ class RcvVentaCsvRowSchemaTest(unittest.TestCase): pass +class RcvCompraRegistroCsvRowSchemaTest(unittest.TestCase): + + # TODO: implement for 'RcvCompraRegistroCsvRowSchema'. + pass + + +class RcvCompraNoIncluirCsvRowSchemaTest(unittest.TestCase): + + # TODO: implement for 'RcvCompraNoIncluirCsvRowSchema'. + pass + + +class RcvCompraReclamadoCsvRowSchemaTest(unittest.TestCase): + + # TODO: implement for 'RcvCompraReclamadoCsvRowSchema'. + pass + + +class RcvCompraPendienteCsvRowSchemaTest(unittest.TestCase): + + # TODO: implement for 'RcvCompraPendienteCsvRowSchema'. + pass + + class FunctionsTest(unittest.TestCase): def test_parse_rcv_venta_csv_file(self) -> None: # TODO: implement for 'parse_rcv_venta_csv_file'. pass + def test_parse_rcv_compra_registro_csv_file(self) -> None: + # TODO: implement for 'parse_rcv_compra_registro_csv_file'. + pass + + def test_parse_rcv_compra_no_incluir_csv_file(self) -> None: + # TODO: implement for 'parse_rcv_compra_no_incluir_csv_file'. + pass + + def test_parse_rcv_compra_reclamado_csv_file(self) -> None: + # TODO: implement for 'parse_rcv_compra_reclamado_csv_file'. + pass + + def test_parse_rcv_compra_pendiente_csv_file(self) -> None: + # TODO: implement for 'parse_rcv_compra_pendiente_csv_file'. + pass + def test__parse_rcv_csv_file(self) -> None: # TODO: implement for '_parse_rcv_csv_file'. pass From 13a76834d91763939f0ddc6578a3c50288af7865 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Fri, 31 May 2019 19:16:14 -0400 Subject: [PATCH 02/14] rcv: remove `process_rcv_csv_file` And its related code. Its name was inappropriate because it supported only an specific kind of RCV CSV file: RCV "Compra/Registro" CSV file. Also, the implementation was not very good. --- cl_sii/dte/parse.py | 2 +- cl_sii/rcv/__init__.py | 61 ------------- cl_sii/rcv/parse.py | 195 ---------------------------------------- tests/test_rcv.py | 10 --- tests/test_rcv_parse.py | 16 ---- 5 files changed, 1 insertion(+), 283 deletions(-) delete mode 100644 cl_sii/rcv/parse.py delete mode 100644 tests/test_rcv.py delete mode 100644 tests/test_rcv_parse.py diff --git a/cl_sii/dte/parse.py b/cl_sii/dte/parse.py index 00b6077d..39b42f1e 100644 --- a/cl_sii/dte/parse.py +++ b/cl_sii/dte/parse.py @@ -131,7 +131,7 @@ def parse_dte_xml(xml_doc: XmlElement) -> data_models.DteDataL2: # TODO: change response type to a dataclass like 'DteXmlData'. # TODO: separate the XML parsing stage from the deserialization stage, which could be # performed by XML-agnostic code (perhaps using Marshmallow or data clacases?). - # See :class:`cl_sii.rcv.parse.RcvCsvRowSchema`. + # See :class:`cl_sii.rcv.parse_csv.RcvVentaCsvRowSchema`. if not isinstance(xml_doc, (XmlElement, XmlElementTree)): raise TypeError("'xml_doc' must be an 'XmlElement'.") diff --git a/cl_sii/rcv/__init__.py b/cl_sii/rcv/__init__.py index 03ca583b..9661404c 100644 --- a/cl_sii/rcv/__init__.py +++ b/cl_sii/rcv/__init__.py @@ -9,64 +9,3 @@ http://www.sii.cl/preguntas_frecuentes/catastro/001_012_6971.htm """ -import csv -import io -from typing import Callable - -from . import parse - - -def process_rcv_csv_file( - text_stream: io.TextIOBase, - rcv_owner_rut: str, - row_data_handler: Callable, - max_data_rows: int = None, -) -> int: - """ - Process a RCV CSV file. - - Processing steps: - - Create a CSV reader, with auto-detection of header names (first row). - - Instantiate an schema to parse and deserialize each row. - - For each data row: - - Using an appropriate schema, deserialize the raw data. - - Apply ``row_data_handler`` to the deserialization output. - - :param text_stream: a file-like object, not necessarily a real file - :param rcv_owner_rut: RCV file owner's RUT - :param row_data_handler: function be called with parsed row data - :param max_data_rows: max number of data rows to process (raise exception if exceeded); - ``None`` means no limit - :return: number of data rows processed - - """ - # TODO: convert to iterator. That way we do not need the 'row_data_handler' and we can also use - # the same function to retrieve the collection of deserialized rows. - - csv_reader = parse.create_rcv_csv_reader(text_stream, expected_fields_strict=True) - schema = parse.RcvCsvRowSchema(context=dict(receptor_rut=rcv_owner_rut)) - - try: - for row_ix, row_data in enumerate(csv_reader, start=1): - if max_data_rows is not None and row_ix > max_data_rows: - # TODO: custom exception - raise Exception("Exceeded 'max_data_rows' value: {}.".format(max_data_rows)) - - try: - deserialized_row_data = schema.deserialize_csv_row(row_data) - except Exception as exc: - exc_msg = "Error deserializing row {} of CSV file: {}".format(row_ix, exc) - raise Exception(exc_msg) from exc - try: - row_data_handler(row_ix, deserialized_row_data) - except Exception as exc: - exc_msg = "Error in row_data_handler for row {} of CSV file: {}".format(row_ix, exc) - raise Exception(exc_msg) from exc - - # The first row in the CSV file is not a data row; it is the headers row. - rows_processed = csv_reader.line_num - 1 - except csv.Error as exc: - exc_msg = "CSV error for line {} of CSV file: {}".format(csv_reader.line_num, exc) - raise Exception(exc_msg) from exc - - return rows_processed diff --git a/cl_sii/rcv/parse.py b/cl_sii/rcv/parse.py deleted file mode 100644 index 8f22786a..00000000 --- a/cl_sii/rcv/parse.py +++ /dev/null @@ -1,195 +0,0 @@ -import csv -import io -from collections import OrderedDict - -import marshmallow -import marshmallow.fields -import marshmallow.validate - -from cl_sii.extras import mm_fields -from cl_sii.libs import mm_utils -from cl_sii.libs import tz_utils - - -_CSV_ROW_DICT_EXTRA_FIELDS_KEY = None -"""CSV row dict key under which the extra data in the row will be saved.""" - -_RCV_CSV_EXPECTED_FIELD_NAMES = ( - 'Nro', - 'Tipo Doc', - 'Tipo Compra', - 'RUT Proveedor', - 'Razon Social', - 'Folio', - 'Fecha Docto', - 'Fecha Recepcion', - 'Fecha Acuse', - 'Monto Exento', - 'Monto Neto', - 'Monto IVA Recuperable', - 'Monto Iva No Recuperable', - 'Codigo IVA No Rec.', - 'Monto Total', - 'Monto Neto Activo Fijo', - 'IVA Activo Fijo', - 'IVA uso Comun', - 'Impto. Sin Derecho a Credito', - 'IVA No Retenido', - 'Tabacos Puros', - 'Tabacos Cigarrillos', - 'Tabacos Elaborados', - 'NCE o NDE sobre Fact. de Compra', - 'Codigo Otro Impuesto', - 'Valor Otro Impuesto', - 'Tasa Otro Impuesto', -) -_RCV_CSV_DIALECT_KEY = 'sii_rcv' - - -class _RcvCsvDialect(csv.Dialect): - - """ - CSV dialect of RCV CSV files. - - The properties of this dialect were determined with the help of - :class:`csv.Sniffer`. - - >>> import gzip - >>> filename = 'SII-download-RCV-file-http-body-response.csv.gz' - >>> with gzip.open(filename, 'rt', encoding='utf-8') as f: - ... dialect = csv.Sniffer().sniff(f.read(50 * 1024)) - - """ - - delimiter = ';' - quotechar = '"' - escapechar = None - doublequote = False - skipinitialspace = False - lineterminator = '\r\n' - quoting = csv.QUOTE_MINIMAL - - -csv.register_dialect(_RCV_CSV_DIALECT_KEY, _RcvCsvDialect) - - -class RcvCsvRowSchema(marshmallow.Schema): - - EXPECTED_INPUT_FIELDS = tuple(_RCV_CSV_EXPECTED_FIELD_NAMES) + (_CSV_ROW_DICT_EXTRA_FIELDS_KEY, ) # type: ignore # noqa: E501 - FIELD_FECHA_RECEPCION_DATETIME_TZ = tz_utils.TZ_CL_SANTIAGO - - class Meta: - strict = True - - emisor_rut = mm_fields.RutField( - required=True, - load_from='RUT Proveedor', - ) - tipo_dte = marshmallow.fields.Integer( - required=True, - load_from='Tipo Doc', - ) - folio = marshmallow.fields.Integer( - required=True, - load_from='Folio', - ) - fecha_emision_date = mm_utils.CustomMarshmallowDateField( - format='%d/%m/%Y', # e.g. '22/10/2018' - required=True, - load_from='Fecha Docto', - ) - fecha_recepcion_datetime = marshmallow.fields.DateTime( - format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' - required=True, - load_from='Fecha Recepcion', - ) - # note: this field value is set using data passed in the schema context. - receptor_rut = mm_fields.RutField( - required=True, - ) - monto_total = marshmallow.fields.Integer( - required=True, - load_from='Monto Total', - ) - - @marshmallow.pre_load - def preprocess(self, in_data: dict) -> dict: - # note: required fields checks are run later on automatically thus we may not assume that - # values of required fields (`required=True`) exist. - - # Set field value only if it was not in the input data. - in_data.setdefault('receptor_rut', self.context['receptor_rut']) - - return in_data - - @marshmallow.post_load - def postprocess(self, data: dict) -> dict: - # >>> data['fecha_recepcion_datetime'].isoformat() - # '2018-10-23T01:54:13' - data['fecha_recepcion_datetime'] = tz_utils.convert_naive_dt_to_tz_aware( - dt=data['fecha_recepcion_datetime'], tz=self.FIELD_FECHA_RECEPCION_DATETIME_TZ) - # >>> data['fecha_recepcion_datetime'].isoformat() - # '2018-10-23T01:54:13-03:00' - # >>> data['fecha_recepcion_datetime'].astimezone(pytz.UTC).isoformat() - # '2018-10-23T04:54:13+00:00' - - # note: to express this value in another timezone (but the value does not change), do - # `datetime_obj.astimezone(pytz.timezone('some timezone'))` - - return data - - @marshmallow.validates_schema(pass_original=True) - def validate_schema(self, data: dict, original_data: dict) -> None: - # Fail validation if there was an unexpected input field. - unexpected_input_fields = ( - set(original_data) - - set(self.fields) - - set(self.EXPECTED_INPUT_FIELDS) - ) - if unexpected_input_fields: - raise marshmallow.ValidationError( - 'Unexpected input field', field_names=list(unexpected_input_fields)) - - # @marshmallow.validates('field_x') - # def validate_field_x(self, value): - # pass - - ########################################################################### - # non-marshmallow-related methods - ########################################################################### - - def deserialize_csv_row(self, row: OrderedDict) -> dict: - try: - result = self.load(row) # type: marshmallow.UnmarshalResult - except marshmallow.ValidationError as exc: - exc_msg = "Validation errors during deserialization." - validation_error_msgs = dict(exc.normalized_messages()) - raise ValueError(exc_msg, validation_error_msgs) from exc - - result_data = result.data # type: dict - result_errors = result.errors # type: dict - if result_errors: - raise Exception("Deserialization errors: %s", result_errors) - return result_data - - -def create_rcv_csv_reader( - text_stream: io.TextIOBase, - expected_fields_strict: bool = True, -) -> csv.DictReader: - # note: mypy wrongly complains: it does not accept 'fieldnames' to be None but that value - # is completely acceptable, and it even is the default! - # > error: Argument "fieldnames" to "DictReader" has incompatible type "None"; expected - # > "Sequence[str]" - csv_reader = csv.DictReader( # type: ignore - text_stream, - fieldnames=None, # the values of the first row will be used as the fieldnames - restkey=_CSV_ROW_DICT_EXTRA_FIELDS_KEY, - dialect=_RCV_CSV_DIALECT_KEY, - ) - if expected_fields_strict and tuple(csv_reader.fieldnames) != _RCV_CSV_EXPECTED_FIELD_NAMES: - raise Exception( - "CSV file field names do not match those expected, or their order.", - csv_reader.fieldnames) - - return csv_reader diff --git a/tests/test_rcv.py b/tests/test_rcv.py deleted file mode 100644 index 81f0d3f0..00000000 --- a/tests/test_rcv.py +++ /dev/null @@ -1,10 +0,0 @@ -import unittest - -from cl_sii.rcv import process_rcv_csv_file # noqa: F401 - - -class FunctionsTest(unittest.TestCase): - - def test_process_rcv_csv_file(self) -> None: - # TODO: implement! - pass diff --git a/tests/test_rcv_parse.py b/tests/test_rcv_parse.py deleted file mode 100644 index 8cf1812b..00000000 --- a/tests/test_rcv_parse.py +++ /dev/null @@ -1,16 +0,0 @@ -import unittest - -from cl_sii.rcv.parse import RcvCsvRowSchema, create_rcv_csv_reader # noqa: F401 - - -class RcvCsvRowSchemaTest(unittest.TestCase): - - # TODO: implement! - pass - - -class FunctionsTest(unittest.TestCase): - - def test_create_rcv_csv_reader(self) -> None: - # TODO: implement! - pass From ee9e35127288d5bf5901d99e17989c9bc52781b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Tue, 11 Jun 2019 18:07:06 -0400 Subject: [PATCH 03/14] libs.tz_utils: remove `TZ_CL_SANTIAGO` Reference it from `base.constants` instead. --- cl_sii/dte/data_models.py | 3 ++- cl_sii/libs/tz_utils.py | 12 ++++-------- tests/test_libs_tz_utils.py | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/cl_sii/dte/data_models.py b/cl_sii/dte/data_models.py index beff1846..998bd3f7 100644 --- a/cl_sii/dte/data_models.py +++ b/cl_sii/dte/data_models.py @@ -22,6 +22,7 @@ import cl_sii.contribuyente.constants import cl_sii.rut.constants +from cl_sii.base.constants import SII_OFFICIAL_TZ from cl_sii.libs import tz_utils from cl_sii.rut import Rut @@ -327,7 +328,7 @@ class DteDataL2(DteDataL1): # constants ########################################################################### - DATETIME_FIELDS_TZ = tz_utils.TZ_CL_SANTIAGO + DATETIME_FIELDS_TZ = SII_OFFICIAL_TZ ########################################################################### # fields diff --git a/cl_sii/libs/tz_utils.py b/cl_sii/libs/tz_utils.py index 9d420f07..aa74826c 100644 --- a/cl_sii/libs/tz_utils.py +++ b/cl_sii/libs/tz_utils.py @@ -27,11 +27,7 @@ TZ_UTC = pytz.UTC # type: PytzTimezone -TZ_CL_SANTIAGO = pytz.timezone('America/Santiago') # type: PytzTimezone - -# TODO: remove -UTC = TZ_UTC -TIMEZONE_CL_SANTIAGO = TZ_CL_SANTIAGO +_TZ_CL_SANTIAGO: PytzTimezone = pytz.timezone('America/Santiago') def get_now_tz_aware() -> datetime: @@ -66,7 +62,7 @@ def convert_naive_dt_to_tz_aware(dt: datetime, tz: PytzTimezone) -> datetime: >>> dt_tz_aware_1.isoformat() '2018-10-23T04:54:13+00:00' - >>> dt_tz_aware_2 = convert_naive_dt_to_tz_aware(dt_naive, TZ_CL_SANTIAGO) + >>> dt_tz_aware_2 = convert_naive_dt_to_tz_aware(dt_naive, _TZ_CL_SANTIAGO) >>> dt_tz_aware_2 datetime.datetime(2018, 10, 23, 1, 54, 13, tzinfo=) @@ -91,7 +87,7 @@ def dt_is_aware(value: datetime) -> bool: False >>> dt_is_aware(convert_naive_dt_to_tz_aware(dt_naive, TZ_UTC)) True - >>> dt_is_aware(convert_naive_dt_to_tz_aware(dt_naive, TZ_CL_SANTIAGO)) + >>> dt_is_aware(convert_naive_dt_to_tz_aware(dt_naive, _TZ_CL_SANTIAGO)) True """ @@ -110,7 +106,7 @@ def dt_is_naive(value: datetime) -> bool: True >>> dt_is_naive(convert_naive_dt_to_tz_aware(dt_naive, TZ_UTC)) False - >>> dt_is_naive(convert_naive_dt_to_tz_aware(dt_naive, TZ_CL_SANTIAGO)) + >>> dt_is_naive(convert_naive_dt_to_tz_aware(dt_naive, _TZ_CL_SANTIAGO)) False """ diff --git a/tests/test_libs_tz_utils.py b/tests/test_libs_tz_utils.py index 4fe2145b..ad13991b 100644 --- a/tests/test_libs_tz_utils.py +++ b/tests/test_libs_tz_utils.py @@ -2,7 +2,7 @@ from cl_sii.libs.tz_utils import ( # noqa: F401 convert_naive_dt_to_tz_aware, dt_is_aware, dt_is_naive, get_now_tz_aware, - PytzTimezone, TZ_CL_SANTIAGO, TZ_UTC, + PytzTimezone, TZ_UTC, ) From b33e31a96208c82b625cd8e7909775b318c3b512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Tue, 11 Jun 2019 18:14:27 -0400 Subject: [PATCH 04/14] libs.tz_utils: add `validate_dt_tz` Move from `dte.data_models` and rename. Tests not yet implemented. --- cl_sii/dte/data_models.py | 9 +-------- cl_sii/libs/tz_utils.py | 10 ++++++++++ tests/test_dte_data_models.py | 4 ---- tests/test_libs_tz_utils.py | 6 +++++- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cl_sii/dte/data_models.py b/cl_sii/dte/data_models.py index 998bd3f7..ccd9b359 100644 --- a/cl_sii/dte/data_models.py +++ b/cl_sii/dte/data_models.py @@ -94,13 +94,6 @@ def validate_non_empty_bytes(value: bytes) -> None: raise ValueError("Bytes value length is 0.") -def validate_correct_tz(value: datetime, tz: tz_utils.PytzTimezone) -> None: - if not tz_utils.dt_is_aware(value): - raise ValueError("Value must be a timezone-aware datetime.", value) - if value.tzinfo.zone != tz.zone: # type: ignore - raise ValueError(f"Timezone of datetime value must be '{tz.zone!s}'.", value) - - @dataclasses.dataclass(frozen=True) class DteNaturalKey: @@ -407,7 +400,7 @@ def __post_init__(self) -> None: if self.firma_documento_dt is not None: if not isinstance(self.firma_documento_dt, datetime): raise TypeError("Inappropriate type of 'firma_documento_dt'.") - validate_correct_tz(self.firma_documento_dt, self.DATETIME_FIELDS_TZ) + tz_utils.validate_dt_tz(self.firma_documento_dt, self.DATETIME_FIELDS_TZ) if self.signature_value is not None: if not isinstance(self.signature_value, bytes): diff --git a/cl_sii/libs/tz_utils.py b/cl_sii/libs/tz_utils.py index aa74826c..8609c077 100644 --- a/cl_sii/libs/tz_utils.py +++ b/cl_sii/libs/tz_utils.py @@ -114,3 +114,13 @@ def dt_is_naive(value: datetime) -> bool: raise TypeError # source: 'django.utils.timezone.is_naive' @ Django 2.1.7 return value.utcoffset() is None + + +def validate_dt_tz(value: datetime, tz: PytzTimezone) -> None: + """ + Validate that ``tz`` is the timezone of ``value``. + """ + if not dt_is_aware(value): + raise ValueError("Value must be a timezone-aware datetime object.") + if value.tzinfo.zone != tz.zone: # type: ignore + raise ValueError(f"Timezone of datetime value must be '{tz.zone!s}'.", value) diff --git a/tests/test_dte_data_models.py b/tests/test_dte_data_models.py index 01d705fb..bbb67073 100644 --- a/tests/test_dte_data_models.py +++ b/tests/test_dte_data_models.py @@ -284,7 +284,3 @@ def test_validate_non_empty_str(self) -> None: def test_validate_non_empty_bytes(self) -> None: # TODO: implement for 'validate_non_empty_bytes' pass - - def test_validate_correct_tz(self) -> None: - # TODO: implement for 'validate_correct_tz' - pass diff --git a/tests/test_libs_tz_utils.py b/tests/test_libs_tz_utils.py index ad13991b..4e1a0360 100644 --- a/tests/test_libs_tz_utils.py +++ b/tests/test_libs_tz_utils.py @@ -1,7 +1,7 @@ import unittest from cl_sii.libs.tz_utils import ( # noqa: F401 - convert_naive_dt_to_tz_aware, dt_is_aware, dt_is_naive, get_now_tz_aware, + convert_naive_dt_to_tz_aware, dt_is_aware, dt_is_naive, get_now_tz_aware, validate_dt_tz, PytzTimezone, TZ_UTC, ) @@ -27,3 +27,7 @@ def test_dt_is_naive(self) -> None: # TODO: implement for 'dt_is_naive' # Reuse doctests/examples in function docstring. pass + + def test_validate_dt_tz(self) -> None: + # TODO: implement for 'validate_dt_tz' + pass From f978f3a1de865a93df46cc36e5ce2003254105cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Tue, 7 May 2019 18:00:18 -0400 Subject: [PATCH 05/14] libs.tz_utils: add `convert_tz_aware_dt_to_naive` Tests not yet implemented. --- cl_sii/libs/tz_utils.py | 37 +++++++++++++++++++++++++++++++++++++ tests/test_libs_tz_utils.py | 8 +++++++- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/cl_sii/libs/tz_utils.py b/cl_sii/libs/tz_utils.py index 8609c077..58de84aa 100644 --- a/cl_sii/libs/tz_utils.py +++ b/cl_sii/libs/tz_utils.py @@ -78,6 +78,43 @@ def convert_naive_dt_to_tz_aware(dt: datetime, tz: PytzTimezone) -> datetime: return dt_tz_aware +def convert_tz_aware_dt_to_naive(dt: datetime, tz: PytzTimezone = None) -> datetime: + """ + Convert a timezone-aware datetime object to an offset-naive one. + + Default ``tz`` is UTC. + + >>> dt_tz_aware = datetime(2018, 10, 1, 2, 30, 0, tzinfo=TZ_UTC) + >>> dt_tz_aware.isoformat() + '2018-10-01T02:30:00+00:00' + + >>> dt_naive_utc = convert_tz_aware_dt_to_naive(dt_tz_aware, TZ_UTC) + >>> dt_naive_utc.isoformat() + '2018-10-01T02:30:00' + + >>> dt_naive_cl_santiago = convert_tz_aware_dt_to_naive(dt_tz_aware, _TZ_CL_SANTIAGO) + >>> dt_naive_cl_santiago.isoformat() + '2018-09-30T23:30:00' + + >>> int((dt_naive_cl_santiago - dt_naive_utc).total_seconds() / 3600) + -3 + >>> (dt_naive_cl_santiago.date() - dt_naive_utc.date()).days + -1 + + :param dt: timezone-aware datetime + :param tz: timezone e.g. ``pytz.timezone('America/Santiago')`` + :raises ValueError: if ``dt`` is not timezone-aware + + """ + if not dt_is_aware(dt): + raise ValueError("Value must be a timezone-aware datetime object.") + + if tz is None: + tz = TZ_UTC + dt_naive = dt.astimezone(tz).replace(tzinfo=None) # type: datetime + return dt_naive + + def dt_is_aware(value: datetime) -> bool: """ Return whether datetime ``value`` is "aware". diff --git a/tests/test_libs_tz_utils.py b/tests/test_libs_tz_utils.py index 4e1a0360..a685c5a5 100644 --- a/tests/test_libs_tz_utils.py +++ b/tests/test_libs_tz_utils.py @@ -1,7 +1,8 @@ import unittest from cl_sii.libs.tz_utils import ( # noqa: F401 - convert_naive_dt_to_tz_aware, dt_is_aware, dt_is_naive, get_now_tz_aware, validate_dt_tz, + convert_naive_dt_to_tz_aware, convert_tz_aware_dt_to_naive, + dt_is_aware, dt_is_naive, get_now_tz_aware, validate_dt_tz, PytzTimezone, TZ_UTC, ) @@ -18,6 +19,11 @@ def test_convert_naive_dt_to_tz_aware(self) -> None: # Reuse doctests/examples in function docstring. pass + def test_convert_tz_aware_dt_to_naive(self) -> None: + # TODO: implement for 'convert_tz_aware_dt_to_naive' + # Reuse doctests/examples in function docstring. + pass + def test_dt_is_aware(self) -> None: # TODO: implement for 'dt_is_aware' # Reuse doctests/examples in function docstring. From ab37a41a6227cd70238763036e22920eacf52ce8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Tue, 11 Jun 2019 18:04:42 -0400 Subject: [PATCH 06/14] add module `base.constants` --- cl_sii/base/__init__.py | 0 cl_sii/base/constants.py | 13 +++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 cl_sii/base/__init__.py create mode 100644 cl_sii/base/constants.py diff --git a/cl_sii/base/__init__.py b/cl_sii/base/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cl_sii/base/constants.py b/cl_sii/base/constants.py new file mode 100644 index 00000000..4f5cd864 --- /dev/null +++ b/cl_sii/base/constants.py @@ -0,0 +1,13 @@ +""" +Base / constants +================ + +""" +import pytz + +from cl_sii.libs.tz_utils import PytzTimezone + + +TZ_CL_SANTIAGO: PytzTimezone = pytz.timezone('America/Santiago') + +SII_OFFICIAL_TZ = TZ_CL_SANTIAGO From 93bd7e2d4893c2ebb963c87afcb6c68455bc1cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Fri, 31 May 2019 19:38:19 -0400 Subject: [PATCH 07/14] rcv: add module `constants` --- cl_sii/rcv/__init__.py | 11 +- cl_sii/rcv/constants.py | 292 ++++++++++++++++++++++++++++++++++++ tests/test_rcv_constants.py | 125 +++++++++++++++ 3 files changed, 423 insertions(+), 5 deletions(-) create mode 100644 cl_sii/rcv/constants.py create mode 100644 tests/test_rcv_constants.py diff --git a/cl_sii/rcv/__init__.py b/cl_sii/rcv/__init__.py index 9661404c..1d68554e 100644 --- a/cl_sii/rcv/__init__.py +++ b/cl_sii/rcv/__init__.py @@ -1,11 +1,12 @@ """ -SII RCV ("Registro de Compras y Ventas"). +SII RCV ("Registro de Compras y Ventas") +======================================== -.. note:: - The RCV ("Registro de Compras y Ventas") is composed of 2 "registros": - RC ("Registro de Compras") and RV ("Registro de Ventas"). +The RCV ("Registro de Compras y Ventas") is composed of 2 "registros": +RC ("Registro de Compras") and RV ("Registro de Ventas"). .. seealso:: - http://www.sii.cl/preguntas_frecuentes/catastro/001_012_6971.htm + See SII / FAQ / + `¿Qué es el Registro de Compras y Ventas (RCV)? `_ # noqa: E501 """ diff --git a/cl_sii/rcv/constants.py b/cl_sii/rcv/constants.py new file mode 100644 index 00000000..ce4a94e6 --- /dev/null +++ b/cl_sii/rcv/constants.py @@ -0,0 +1,292 @@ +import enum + +from ..dte.constants import TipoDteEnum + + +@enum.unique +class RcvKind(enum.Enum): + """ + The kind of RCV. + + Definitions: + + * Registro de Compras (RC): + > [..] todas las operaciones de compras realizadas por un contribuyente + > de acuerdo a los DTEs recepcionados por el SII, complementado con los + > documentos tributarios de compras, en soporte distinto al electrónico, + > en el cual deberá indicarse la naturaleza de las operaciones en + > cuanto a la procedencia e identificación del crédito fiscal. + + * Registro de Ventas (RV): + > [..] todas las operaciones de ventas realizadas por un contribuyente + > de acuerdo a los DTEs recibidos en el SII. + + Definitions source: RCV_FAQ_ + + .. _RCV_FAQ: http://www.sii.cl/ayudas/ayudas_por_servicios/rcv_faqs.pdf + + """ + + COMPRAS = 'COMPRAS' + """RCV / compras.""" + + VENTAS = 'VENTAS' + """RCV / ventas.""" + + +@enum.unique +class RcEstadoContable(enum.Enum): + """ + The "Estado Contable" of a "Registro de compras" (RC). + + Applies to ``RcvKind.COMPRAS``. + + Definitions: + + * "Registro": + > [..] los Documentos Tributarios Electrónicos (DTE) y no Electrónicos + > que conforman la Información de Compras válida, la cual se utiliza para + > la determinación impositiva y es considerada como el registro oficial + > del Contribuyente y respaldo de su contabilidad. + + * "No incluir": + > [..] los Documentos Tributarios Electrónicos (DTE) y no Electrónicos + > que no deben incluirse en el Registro de Compras, en virtud de + > corresponder a compras y pagos de servicios que no tienen relación con + > las actividades económicas del contribuyente y por lo tanto no deben + > afectar la determinación impositiva. + + * "Reclamado(s)": + > [..] los Documentos Tributarios Electrónicos (DTE) que han sido + > reclamados por el mismo receptor en el Registro de Aceptación o Reclamo + > de un DTE. + > Por encontrarse en estado "Reclamado", no es posible ingresar estos + > documentos en el Registro de Compras vigente o considerarse para la + > determinación impositiva. + + * "Pendiente(s)": + > [..] los Documentos Tributarios Electrónicos (DTE) que han sido + > recibidos en el SII, pero que se encuentran pendientes de otorgarse el + > Acuse de Recibo o entregarse un Reclamo por parte del receptor [..]. + + Definitions source: RCV_web_app_ + + .. _RCV_web_app: https://www4.sii.cl/consdcvinternetui/#/index + + """ + + REGISTRO = 'REGISTRO' + NO_INCLUIR = 'NO_INCLUIR' + RECLAMADO = 'RECLAMADO' + PENDIENTE = 'PENDIENTE' + + +@enum.unique +class RcvTipoDocto(enum.IntEnum): + + """ + Enum of "Tipo de Documento" for the RCV domain. + + Unlike :class:`cl_sii.dte.constants.TipoDteEnum` this collection is not + restricted to "documentos electrónicos". However, this is not a superset + of the latter (e.g. "Guía electrónica de despacho" (52) is in + ``TipoDteEnum`` but not in ``RcvTipoDocto``). + + Sources: + + * XML type (enum) ``DoctoType`` ("Tipos de Documentos") in + official schema ``LibroCV_v10.xsd``. + https://github.com/fyndata/lib-cl-sii-python/blob/f57a326/cl_sii/data/ref/factura_electronica/schemas-xml/LibroCV_v10.xsd#L1563-L1622 + + * Values returned by SII endpoint + https://www4.sii.cl/consdcvinternetui/services/data/facadeService/getDatosInicio + + * Constant ``cl_sii_api.rcv.AVAILABLE_DOCUMENT_TYPES`` in package + ``cl-sii-api`` v0.2.2. + https://github.com/fynpal/lib-cl-sii-api-python/blob/81b4a43/cl_sii_api/rcv.py + + """ + + ########################################################################### + # "facturas" + ########################################################################### + + FACTURA_INICIO = 29 + """Factura de inicio.""" + + FACTURA = 30 + """Factura.""" + + FACTURA_ELECTRONICA = 33 + """Factura electrónica de venta.""" + + FACTURA_NO_AFECTA_O_EXENTA = 32 + """Factura de venta, no afecta o exenta de IVA.""" + # aka 'Factura no Afecta o Exenta' + # aka 'Factura de Venta de Bienes y Servicios No afectos o Exento de IVA' + + FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA = 34 + """Factura electrónica de venta, no afecta o exenta de IVA.""" + # aka 'Factura no Afecta o Exenta Electrónica' + # aka 'Factura Electrónica de Venta de Bienes y Servicios No afectos o Exento de IVA' + + FACTURA_COMPRA = 45 + """Factura de compra.""" + + FACTURA_COMPRA_ELECTRONICA = 46 + """Factura electrónica de compra.""" + # aka 'Factura de Compra Electrónica' + # Name should have been 'Factura Electrónica de Compra'. + + FACTURA_EXPORTACION = 101 + """Factura de Exportación.""" + + FACTURA_EXPORTACION_ELECTRONICA = 110 + """Factura Electrónica de Exportación.""" + # aka 'Factura de Exportación Electrónica' + # Name should have been 'Factura Electrónica de Exportación'. + + ########################################################################### + # "notas" + ########################################################################### + + NOTA_DEBITO = 55 + """Nota de débito.""" + + NOTA_DEBITO_ELECTRONICA = 56 + """Nota electrónica de débito.""" + # aka 'Nota de Débito Electrónica' + + NOTA_CREDITO = 60 + """Nota de crédito.""" + + NOTA_CREDITO_ELECTRONICA = 61 + """Nota electrónica de crédito.""" + # aka 'Nota de Crédito Electrónica' + + NOTA_DEBITO_EXPORTACION = 104 + """Nota de débito de exportación.""" + + NOTA_DEBITO_EXPORTACION_ELECTRONICA = 111 + """Nota electrónica de débito de exportación.""" + + NOTA_CREDITO_EXPORTACION = 106 + """Nota de crédito de exportación.""" + + NOTA_CREDITO_EXPORTACION_ELECTRONICA = 112 + """Nota electrónica de crédito de exportación.""" + + ########################################################################### + # "liquidación-factura" + ########################################################################### + + # For more info about a "liquidación-factura" see: + # http://www.sii.cl/preguntas_frecuentes/catastro/001_012_0247.htm + # http://www.sii.cl/preguntas_frecuentes/catastro/001_012_3689.htm + + LIQUIDACION_FACTURA = 40 + """Liquidación-Factura.""" + + LIQUIDACION_FACTURA_ELECTRONICA = 43 + """Liquidación-Factura Electrónica.""" + + ########################################################################### + # "Total Op. del mes Boleta X" + ########################################################################### + + TOTAL_OP_DEL_MES_BOLETA_AFECTA = 35 + """Total Oper. del mes Boleta Afecta.""" + + TOTAL_OP_DEL_MES_BOLETA_EXENTA = 38 + """Total Oper. del mes Boleta Exenta.""" + + TOTAL_OP_DEL_MES_BOLETA_EXENTA_ELECTR = 41 + """Total Op. del mes Boleta Exenta Electr.""" + + TOTAL_OP_DEL_MES_BOLETA_ELECTR = 39 + """Total Oper. del mes Boleta Electr.""" + + ########################################################################### + # uncommon + ########################################################################### + + TIPO_47 = 47 + """Total del mes Vale Electrónico Especial.""" + + TIPO_48 = 48 + """Total mes Comprobantes Pago Electrónico.""" + + TIPO_102 = 102 + """Factura vta. Exenta a Zona Franca Prim.""" + + TIPO_103 = 103 + """Liquidación.""" + + TIPO_105 = 105 + """Total Op. mes Boleta Liq. Res. 1423/76.""" + + TIPO_108 = 108 + """SRF Solicitud Registro de Factura.""" + + TIPO_109 = 109 + """Factura Turista.""" + + TIPO_901 = 901 + """Fact. Vta. Emp. Terr. Pref. Res. 1057/85.""" + + TIPO_902 = 902 + """Conocimiento Embarque Marítimo o Aéreo.""" + + TIPO_903 = 903 + """Documento Único de Salida (DUS).""" + + TIPO_904 = 904 + """Factura de Traspaso.""" + + TIPO_905 = 905 + """Factura de Reexpedición.""" + + TIPO_906 = 906 + """Total Op. del mes Boleta Vta. Módulo ZF.""" + + TIPO_907 = 907 + """Facturas Venta Módulo ZF.""" + + TIPO_909 = 909 + """Facturas Venta Módulo ZF.""" + + TIPO_910 = 910 + """Solicitud Traslado Zona Franca (ZF).""" + + TIPO_911 = 911 + """Decl. de Ingreso a Zona Franca Primaria.""" + + TIPO_914 = 914 + """Declaración de Ingreso (DIN).""" + + TIPO_919 = 919 + """Resumen Vtas. Pasajes Nac. sin Factura.""" + + TIPO_920 = 920 + """Otros registros no Docum. Aumenta Débito.""" + + TIPO_922 = 922 + """Otros registros no Doc. Disminuye Débito.""" + + TIPO_924 = 924 + """Resumen Vtas. Pasajes Inter. sin Fact.""" + + def as_tipo_dte(self) -> TipoDteEnum: + """ + Return equivalent "Tipo DTE". + + :raises ValueError: if there is no equivalent one + + """ + try: + value = TipoDteEnum(self.value) + except ValueError as exc: + raise ValueError( + f"There is no equivalent 'TipoDteEnum' for 'RcvTipoDocto.{self.name}'.") from exc + + return value diff --git a/tests/test_rcv_constants.py b/tests/test_rcv_constants.py new file mode 100644 index 00000000..6c810479 --- /dev/null +++ b/tests/test_rcv_constants.py @@ -0,0 +1,125 @@ +import unittest + +from cl_sii.dte.constants import TipoDteEnum # noqa: F401 +from cl_sii.rcv import constants # noqa: F401 +from cl_sii.rcv.constants import RcEstadoContable, RcvKind, RcvTipoDocto # noqa: F401 + + +class RcvKindTest(unittest.TestCase): + + def test_members(self): + self.assertSetEqual( + {x for x in RcvKind}, + { + RcvKind.COMPRAS, + RcvKind.VENTAS, + } + ) + + def test_values_type(self): + self.assertSetEqual( + {type(x.value) for x in RcvKind}, + {str} + ) + + +class RcEstadoContableTest(unittest.TestCase): + + def test_members(self): + self.assertSetEqual( + {x for x in RcEstadoContable}, + { + RcEstadoContable.REGISTRO, + RcEstadoContable.NO_INCLUIR, + RcEstadoContable.RECLAMADO, + RcEstadoContable.PENDIENTE, + } + ) + + def test_values_type(self): + self.assertSetEqual( + {type(x.value) for x in RcEstadoContable}, + {str} + ) + + +class RcvTipoDoctoTest(unittest.TestCase): + + def test_members(self): + self.assertSetEqual( + {x for x in RcvTipoDocto}, + { + RcvTipoDocto.FACTURA_INICIO, + RcvTipoDocto.FACTURA, + RcvTipoDocto.FACTURA_ELECTRONICA, + RcvTipoDocto.FACTURA_NO_AFECTA_O_EXENTA, + RcvTipoDocto.FACTURA_NO_AFECTA_O_EXENTA_ELECTRONICA, + RcvTipoDocto.FACTURA_COMPRA, + RcvTipoDocto.FACTURA_COMPRA_ELECTRONICA, + RcvTipoDocto.FACTURA_EXPORTACION, + RcvTipoDocto.FACTURA_EXPORTACION_ELECTRONICA, + + RcvTipoDocto.NOTA_DEBITO, + RcvTipoDocto.NOTA_DEBITO_ELECTRONICA, + RcvTipoDocto.NOTA_CREDITO, + RcvTipoDocto.NOTA_CREDITO_ELECTRONICA, + RcvTipoDocto.NOTA_DEBITO_EXPORTACION, + RcvTipoDocto.NOTA_DEBITO_EXPORTACION_ELECTRONICA, + RcvTipoDocto.NOTA_CREDITO_EXPORTACION, + RcvTipoDocto.NOTA_CREDITO_EXPORTACION_ELECTRONICA, + + RcvTipoDocto.LIQUIDACION_FACTURA, + RcvTipoDocto.LIQUIDACION_FACTURA_ELECTRONICA, + + RcvTipoDocto.TOTAL_OP_DEL_MES_BOLETA_AFECTA, + RcvTipoDocto.TOTAL_OP_DEL_MES_BOLETA_EXENTA, + RcvTipoDocto.TOTAL_OP_DEL_MES_BOLETA_EXENTA_ELECTR, + RcvTipoDocto.TOTAL_OP_DEL_MES_BOLETA_ELECTR, + + RcvTipoDocto.TIPO_47, + RcvTipoDocto.TIPO_48, + RcvTipoDocto.TIPO_102, + RcvTipoDocto.TIPO_103, + RcvTipoDocto.TIPO_105, + RcvTipoDocto.TIPO_108, + RcvTipoDocto.TIPO_109, + RcvTipoDocto.TIPO_901, + RcvTipoDocto.TIPO_902, + RcvTipoDocto.TIPO_903, + RcvTipoDocto.TIPO_904, + RcvTipoDocto.TIPO_905, + RcvTipoDocto.TIPO_906, + RcvTipoDocto.TIPO_907, + RcvTipoDocto.TIPO_909, + RcvTipoDocto.TIPO_910, + RcvTipoDocto.TIPO_911, + RcvTipoDocto.TIPO_914, + RcvTipoDocto.TIPO_919, + RcvTipoDocto.TIPO_920, + RcvTipoDocto.TIPO_922, + RcvTipoDocto.TIPO_924, + } + ) + + def test_values_type(self): + self.assertSetEqual( + {type(x.value) for x in RcvTipoDocto}, + {int} + ) + + def test_of_some_member(self): + value = RcvTipoDocto.FACTURA_ELECTRONICA + + self.assertEqual(value.name, 'FACTURA_ELECTRONICA') + self.assertEqual(value.value, 33) + + def test_as_tipo_dte(self): + self.assertEqual( + RcvTipoDocto.FACTURA_ELECTRONICA.as_tipo_dte(), + TipoDteEnum.FACTURA_ELECTRONICA) + + with self.assertRaises(ValueError) as cm: + RcvTipoDocto.FACTURA.as_tipo_dte() + self.assertEqual( + cm.exception.args, + ("There is no equivalent 'TipoDteEnum' for 'RcvTipoDocto.FACTURA'.", )) From d99b0eef439ab3ffc5ab656f12eadbb416392cc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Sat, 8 Jun 2019 21:17:10 -0400 Subject: [PATCH 08/14] extras.mm_fields: add `RcvTipoDoctoField` Marshmallow field for RCV's "tipo documento". --- cl_sii/extras/mm_fields.py | 54 ++++++++++++ tests/test_extras_mm_fields.py | 151 ++++++++++++++++++++++++++++++++- 2 files changed, 204 insertions(+), 1 deletion(-) diff --git a/cl_sii/extras/mm_fields.py b/cl_sii/extras/mm_fields.py index 59b17535..772edb68 100644 --- a/cl_sii/extras/mm_fields.py +++ b/cl_sii/extras/mm_fields.py @@ -14,6 +14,7 @@ import marshmallow.fields from cl_sii.dte.constants import TipoDteEnum +from cl_sii.rcv.constants import RcvTipoDocto from cl_sii.rut import Rut @@ -117,3 +118,56 @@ def _validated(self, value: Optional[object]) -> Optional[TipoDteEnum]: # TipoDteEnum('x') raises 'ValueError', not 'TypeError' self.fail('invalid') return validated + + +class RcvTipoDoctoField(marshmallow.fields.Field): + + """ + Marshmallow field for RCV's "tipo documento". + + Data types: + * native/primitive/internal/deserialized: :class:`RcvTipoDocto` + * representation/serialized: int, same as for Marshmallow field + :class:`marshmallow.fields.Integer` + + The field performs some input value cleaning when it is an str; + for example ``' 33 \t '`` is allowed and the resulting value + is ``RcvTipoDocto(33)``. + + Implementation almost identical to :class:`TipoDteField`. + + """ + + default_error_messages = { + 'invalid': "Not a valid RCV's Tipo de Documento." + } + + def _serialize(self, value: Optional[object], attr: str, obj: object) -> Optional[int]: + validated: Optional[RcvTipoDocto] = self._validated(value) + return validated.value if validated is not None else None + + def _deserialize(self, value: object, attr: str, data: dict) -> Optional[RcvTipoDocto]: + return self._validated(value) + + def _validated(self, value: Optional[object]) -> Optional[RcvTipoDocto]: + if value is None or isinstance(value, RcvTipoDocto): + validated = value + else: + if isinstance(value, bool): + # is value is bool, `isinstance(value, int)` is True and `int(value)` works! + self.fail('type') + try: + value = int(value) # type: ignore + except ValueError: + # `int('x')` raises 'ValueError', not 'TypeError' + self.fail('type') + except TypeError: + # `int(date(2018, 10, 10))` raises 'TypeError', unlike `int('x')` + self.fail('type') + + try: + validated = RcvTipoDocto(value) # type: ignore + except ValueError: + # RcvTipoDocto('x') raises 'ValueError', not 'TypeError' + self.fail('invalid') + return validated diff --git a/tests/test_extras_mm_fields.py b/tests/test_extras_mm_fields.py index 9253eefd..9714de37 100644 --- a/tests/test_extras_mm_fields.py +++ b/tests/test_extras_mm_fields.py @@ -3,7 +3,11 @@ import marshmallow -from cl_sii.extras.mm_fields import Rut, RutField, TipoDteEnum, TipoDteField +from cl_sii.extras.mm_fields import ( + RcvTipoDocto, RcvTipoDoctoField, + Rut, RutField, + TipoDteEnum, TipoDteField, +) class RutFieldTest(unittest.TestCase): @@ -290,3 +294,148 @@ def test_dump_fail(self) -> None: self.assertDictEqual(errors, {'tipo_dte': ['Invalid input type.']}) data, errors = schema.dump(obj_invalid_5) self.assertDictEqual(errors, {'tipo_dte': ['Invalid input type.']}) + + +class RcvTipoDoctoFieldTest(unittest.TestCase): + + def setUp(self) -> None: + + class MyObj: + def __init__(self, tipo_docto: RcvTipoDocto, other_field: int = None) -> None: + self.tipo_docto = tipo_docto + self.other_field = other_field + + class MyBadObj: + def __init__(self, some_field: int) -> None: + self.some_field = some_field + + class MyMmSchema(marshmallow.Schema): + + class Meta: + strict = False + + tipo_docto = RcvTipoDoctoField( + required=True, + load_from='source field name', + ) + other_field = marshmallow.fields.Integer( + required=False, + ) + + class MyMmSchemaStrict(marshmallow.Schema): + + class Meta: + strict = True + + tipo_docto = RcvTipoDoctoField( + required=True, + load_from='source field name', + ) + other_field = marshmallow.fields.Integer( + required=False, + ) + + self.MyObj = MyObj + self.MyBadObj = MyBadObj + self.MyMmSchema = MyMmSchema + self.MyMmSchemaStrict = MyMmSchemaStrict + + def test_load_ok_valid(self) -> None: + schema = self.MyMmSchema() + + data_valid_1 = {'source field name': 33} + data_valid_2 = {'source field name': RcvTipoDocto(33)} + data_valid_3 = {'source field name': ' 33 \t '} + + result = schema.load(data_valid_1) + self.assertDictEqual(dict(result.data), {'tipo_docto': RcvTipoDocto(33)}) + self.assertDictEqual(dict(result.errors), {}) + + result = schema.load(data_valid_2) + self.assertDictEqual(dict(result.data), {'tipo_docto': RcvTipoDocto(33)}) + self.assertDictEqual(dict(result.errors), {}) + + result = schema.load(data_valid_3) + self.assertDictEqual(dict(result.data), {'tipo_docto': RcvTipoDocto(33)}) + self.assertDictEqual(dict(result.errors), {}) + + def test_dump_ok_valid(self) -> None: + schema = self.MyMmSchema() + + obj_valid_1 = self.MyObj(tipo_docto=RcvTipoDocto(33)) + obj_valid_2 = self.MyObj(tipo_docto=None) + + data, errors = schema.dump(obj_valid_1) + self.assertDictEqual(data, {'tipo_docto': 33, 'other_field': None}) + self.assertDictEqual(errors, {}) + + data, errors = schema.dump(obj_valid_2) + self.assertDictEqual(data, {'tipo_docto': None, 'other_field': None}) + self.assertDictEqual(errors, {}) + + def test_dump_ok_strange(self) -> None: + # If the class of the object to be dumped has attributes that do not match at all the + # fields of the schema, there are no errors! Even if the schema has `strict = True` set. + + schema = self.MyMmSchema() + schema_strict = self.MyMmSchemaStrict() + + obj_valid_1 = self.MyBadObj(some_field=123) + obj_valid_2 = self.MyBadObj(some_field=None) + + data, errors = schema.dump(obj_valid_1) + self.assertEqual((data, errors), ({}, {})) + + data, errors = schema_strict.dump(obj_valid_1) + self.assertEqual((data, errors), ({}, {})) + + data, errors = schema.dump(obj_valid_2) + self.assertEqual((data, errors), ({}, {})) + + data, errors = schema_strict.dump(obj_valid_2) + self.assertEqual((data, errors), ({}, {})) + + def test_load_fail(self) -> None: + + schema = self.MyMmSchema() + + data_invalid_1 = {'source field name': '123'} + data_invalid_2 = {'source field name': True} + data_invalid_3 = {'source field name': None} + data_invalid_4 = {} + + result = schema.load(data_invalid_1) + self.assertDictEqual(dict(result.data), {}) + self.assertDictEqual(dict(result.errors), {'source field name': ["Not a valid RCV's Tipo de Documento."]}) # noqa: E501 + + result = schema.load(data_invalid_2) + self.assertDictEqual(dict(result.data), {}) + self.assertDictEqual(dict(result.errors), {'source field name': ['Invalid input type.']}) + + result = schema.load(data_invalid_3) + self.assertDictEqual(dict(result.data), {}) + self.assertDictEqual(dict(result.errors), {'source field name': ['Field may not be null.']}) + + result = schema.load(data_invalid_4) + self.assertDictEqual(dict(result.data), {}) + self.assertDictEqual(dict(result.errors), {'source field name': ['Missing data for required field.']}) # noqa: E501 + + def test_dump_fail(self) -> None: + schema = self.MyMmSchema() + + obj_invalid_1 = self.MyObj(tipo_docto=100) + obj_invalid_2 = self.MyObj(tipo_docto=True) + obj_invalid_3 = self.MyObj(tipo_docto='FACTURA_ELECTRONICA') + obj_invalid_4 = self.MyObj(tipo_docto='') + obj_invalid_5 = self.MyObj(tipo_docto=date(2018, 12, 23)) + + data, errors = schema.dump(obj_invalid_1) + self.assertDictEqual(errors, {'tipo_docto': ["Not a valid RCV's Tipo de Documento."]}) + data, errors = schema.dump(obj_invalid_2) + self.assertDictEqual(errors, {'tipo_docto': ['Invalid input type.']}) + data, errors = schema.dump(obj_invalid_3) + self.assertDictEqual(errors, {'tipo_docto': ['Invalid input type.']}) + data, errors = schema.dump(obj_invalid_4) + self.assertDictEqual(errors, {'tipo_docto': ['Invalid input type.']}) + data, errors = schema.dump(obj_invalid_5) + self.assertDictEqual(errors, {'tipo_docto': ['Invalid input type.']}) From ffd1d5765cf432abfafed06b1eb63aa88c85e566 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Mon, 10 Jun 2019 16:42:53 -0400 Subject: [PATCH 09/14] rcv: add module `data_models` --- cl_sii/rcv/data_models.py | 327 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 327 insertions(+) create mode 100644 cl_sii/rcv/data_models.py diff --git a/cl_sii/rcv/data_models.py b/cl_sii/rcv/data_models.py new file mode 100644 index 00000000..c25625b4 --- /dev/null +++ b/cl_sii/rcv/data_models.py @@ -0,0 +1,327 @@ +""" +RCV data models +=============== + + +""" +from __future__ import annotations + +import dataclasses +import logging +from dataclasses import field as dc_field +from datetime import date, datetime +from typing import Optional + +import cl_sii.dte.data_models +from cl_sii.base.constants import SII_OFFICIAL_TZ +from cl_sii.libs import tz_utils +from cl_sii.rut import Rut + +from .constants import RcEstadoContable, RcvKind, RcvTipoDocto + + +logger = logging.getLogger(__name__) + + +@dataclasses.dataclass(frozen=True) +class PeriodoTributario: + + year: int = dc_field() + month: int = dc_field() + + def __post_init__(self) -> None: + if not isinstance(self.year, int): + raise TypeError("Inappropriate type of 'year'.") + if self.year < 1900: # arbitrary number but it more useful than checking not < 1. + raise ValueError("Value is out of the valid range for 'year'.") + + if not isinstance(self.month, int): + raise TypeError("Inappropriate type of 'month'.") + if self.month < 1 or self.month > 12: + raise ValueError("Value is out of the valid range for 'month'.") + + ########################################################################### + # dunder/magic methods + ########################################################################### + + def __str__(self) -> str: + # 'YYYY-MM' e.g. '2018-03' + return f"{self.year}-{self.month:02d}" + + def __lt__(self, other: PeriodoTributario) -> bool: + return self.as_date() < other.as_date() + + def __le__(self, other: PeriodoTributario) -> bool: + return self.as_date() <= other.as_date() + + ########################################################################### + # custom methods + ########################################################################### + + @property + def is_in_the_future(self) -> bool: + return self.as_datetime() > tz_utils.get_now_tz_aware() + + @classmethod + def from_date(cls, value: date) -> PeriodoTributario: + return PeriodoTributario(year=value.year, month=value.month) + + @classmethod + def from_datetime(cls, value: datetime) -> PeriodoTributario: + value_naive = tz_utils.convert_tz_aware_dt_to_naive(value, SII_OFFICIAL_TZ) + return cls.from_date(value_naive.date()) + + def as_date(self) -> date: + return date(self.year, self.month, day=1) + + def as_datetime(self) -> datetime: + # note: timezone-aware + return datetime(self.year, self.month, day=1, hour=0, minute=0, second=0).replace( + tzinfo=SII_OFFICIAL_TZ) + + +@dataclasses.dataclass(frozen=True) +class RcvDetalleEntry: + + """ + Entry of the "detalle" of an RCV. + """ + + ########################################################################### + # constants + ########################################################################### + + # note: as of Python 3.7.3 we can not do something like `RCV_KIND: Optional[RcvKind] = None` + # because 'dataclasses' gets confused and assumes that that class attribute is a dataclass + # field (it is not), and this error is triggered: + # > TypeError: non-default argument 'my_dc_field' follows default argument + + RCV_KIND = None # type: Optional[RcvKind] + RC_ESTADO_CONTABLE = None # type: Optional[RcEstadoContable] + + ########################################################################### + # fields + ########################################################################### + + emisor_rut: Rut = dc_field() + """ + RUT of the "emisor" of the "documento". + """ + + tipo_docto: RcvTipoDocto = dc_field() + """ + The kind of "documento". + """ + + folio: int = dc_field() + """ + The sequential number of a "documento". + """ + + # TODO: docstring + fecha_emision_date: date = dc_field() + + # TODO: docstring + # TODO: can it be None? What happens for those "tipo docto" that do not have a receptor? + receptor_rut: Rut = dc_field() + + monto_total: int = dc_field() + """ + Total amount of the "documento". + """ + + emisor_razon_social: str = dc_field() + """ + "Razón social" (legal name) of the "emisor" of the "documento". + """ + + # TODO: docstring + # TODO: can it be None? What happens for those "tipo docto" that do not have a receptor? + receptor_razon_social: str = dc_field() + + # TODO: docstring + # note: must be timezone-aware. + fecha_recepcion_dt: datetime = dc_field() + + def __post_init__(self) -> None: + """ + Run validation automatically after setting the fields values. + + :raises TypeError, ValueError: + + """ + if self.RCV_KIND == RcvKind.COMPRAS: + if self.RC_ESTADO_CONTABLE is None: + raise ValueError( + "'RC_ESTADO_CONTABLE' must not be None when 'RCV_KIND' is 'COMPRAS'.") + elif self.RCV_KIND == RcvKind.VENTAS: + if self.RC_ESTADO_CONTABLE is not None: + raise ValueError( + "'RC_ESTADO_CONTABLE' must be None when 'RCV_KIND' is 'VENTAS'.") + + if not isinstance(self.emisor_rut, Rut): + raise TypeError("Inappropriate type of 'emisor_rut'.") + + if not isinstance(self.tipo_docto, RcvTipoDocto): + raise TypeError("Inappropriate type of 'tipo_docto'.") + + if not isinstance(self.folio, int): + raise TypeError("Inappropriate type of 'folio'.") + if not self.folio > 0: + raise ValueError("Inappropriate value of 'folio'.") + + if not isinstance(self.fecha_emision_date, date): + raise TypeError("Inappropriate type of 'fecha_emision_date'.") + + if not isinstance(self.receptor_rut, Rut): + raise TypeError("Inappropriate type of 'receptor_rut'.") + + # TODO: figure out validation rules of 'monto_total' + if not isinstance(self.monto_total, int): + raise TypeError("Inappropriate type of 'monto_total'.") + + if not isinstance(self.emisor_razon_social, str): + raise TypeError("Inappropriate type of 'emisor_razon_social'.") + cl_sii.dte.data_models.validate_contribuyente_razon_social(self.emisor_razon_social) + + if not isinstance(self.receptor_razon_social, str): + raise TypeError("Inappropriate type of 'receptor_razon_social'.") + cl_sii.dte.data_models.validate_contribuyente_razon_social(self.receptor_razon_social) + + if not isinstance(self.fecha_recepcion_dt, datetime): + raise TypeError("Inappropriate type of 'fecha_recepcion_dt'.") + tz_utils.validate_dt_tz(self.fecha_recepcion_dt, SII_OFFICIAL_TZ) + + @property + def is_dte(self) -> bool: + try: + self.tipo_docto.as_tipo_dte() + except ValueError: + return False + return True + + def as_dte_data_l2(self) -> cl_sii.dte.data_models.DteDataL2: + try: + tipo_dte = self.tipo_docto.as_tipo_dte() + + dte_data = cl_sii.dte.data_models.DteDataL2( + emisor_rut=self.emisor_rut, + tipo_dte=tipo_dte, + folio=self.folio, + fecha_emision_date=self.fecha_emision_date, + receptor_rut=self.receptor_rut, + monto_total=self.monto_total, + emisor_razon_social=self.emisor_razon_social, + receptor_razon_social=self.receptor_razon_social, + # fecha_vencimiento_date='', + # firma_documento_dt='', + # signature_value='', + # signature_x509_cert_der='', + # emisor_giro='', + # emisor_email='', + # receptor_email='', + ) + except (TypeError, ValueError): + raise + + return dte_data + + +@dataclasses.dataclass(frozen=True) +class RvDetalleEntry(RcvDetalleEntry): + + """ + Entry of the "detalle" of an RV ("Registro de Ventas"). + """ + + RCV_KIND = RcvKind.VENTAS + RC_ESTADO_CONTABLE = None + + # TODO: docstring + # note: must be timezone-aware. + fecha_acuse_dt: Optional[datetime] = dc_field() + + # TODO: docstring + # note: must be timezone-aware. + fecha_reclamo_dt: Optional[datetime] = dc_field() + + def __post_init__(self) -> None: + super().__post_init__() + + if self.fecha_acuse_dt is not None: + if not isinstance(self.fecha_acuse_dt, datetime): + raise TypeError("Inappropriate type of 'fecha_acuse_dt'.") + tz_utils.validate_dt_tz(self.fecha_acuse_dt, SII_OFFICIAL_TZ) + + if self.fecha_reclamo_dt is not None: + if not isinstance(self.fecha_reclamo_dt, datetime): + raise TypeError("Inappropriate type of 'fecha_reclamo_dt'.") + tz_utils.validate_dt_tz(self.fecha_reclamo_dt, SII_OFFICIAL_TZ) + + +@dataclasses.dataclass(frozen=True) +class RcRegistroDetalleEntry(RcvDetalleEntry): + + """ + Entry of the "detalle" of an RC ("Registro de Compras") / "registro". + """ + + RCV_KIND = RcvKind.COMPRAS + RC_ESTADO_CONTABLE = RcEstadoContable.REGISTRO + + # TODO: docstring + # note: must be timezone-aware. + fecha_acuse_dt: Optional[datetime] = dc_field() + + def __post_init__(self) -> None: + super().__post_init__() + + if self.fecha_acuse_dt is not None: + if not isinstance(self.fecha_acuse_dt, datetime): + raise TypeError("Inappropriate type of 'fecha_acuse_dt'.") + tz_utils.validate_dt_tz(self.fecha_acuse_dt, SII_OFFICIAL_TZ) + + +@dataclasses.dataclass(frozen=True) +class RcNoIncluirDetalleEntry(RcRegistroDetalleEntry): + + """ + Entry of the "detalle" of an RC ("Registro de Compras") / "no incluir". + """ + + RCV_KIND = RcvKind.COMPRAS + RC_ESTADO_CONTABLE = RcEstadoContable.NO_INCLUIR + + +@dataclasses.dataclass(frozen=True) +class RcReclamadoDetalleEntry(RcvDetalleEntry): + + """ + Entry of the "detalle" of an RC ("Registro de Compras") / "reclamado". + """ + + RCV_KIND = RcvKind.COMPRAS + RC_ESTADO_CONTABLE = RcEstadoContable.RECLAMADO + + # TODO: docstring + # note: must be timezone-aware. + fecha_reclamo_dt: Optional[datetime] = dc_field() + + def __post_init__(self) -> None: + super().__post_init__() + + if self.fecha_reclamo_dt is not None: + if not isinstance(self.fecha_reclamo_dt, datetime): + raise TypeError("Inappropriate type of 'fecha_reclamo_dt'.") + tz_utils.validate_dt_tz(self.fecha_reclamo_dt, SII_OFFICIAL_TZ) + + +@dataclasses.dataclass(frozen=True) +class RcPendienteDetalleEntry(RcvDetalleEntry): + + """ + Entry of the "detalle" of an RC ("Registro de Compras") / "pendiente". + """ + + RCV_KIND = RcvKind.COMPRAS + RC_ESTADO_CONTABLE = RcEstadoContable.PENDIENTE From d070abf5deab5bd7433f132e0760cb539016fec5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Wed, 12 Jun 2019 20:23:26 -0400 Subject: [PATCH 10/14] libs: add module `io_utils` Include functions: - `with_mode_binary` - `with_mode_text` - `with_encoding_utf8` --- cl_sii/libs/io_utils.py | 71 +++++++++++++++ tests/test_libs_io_utils.py | 170 ++++++++++++++++++++++++++++++++++++ 2 files changed, 241 insertions(+) create mode 100644 cl_sii/libs/io_utils.py create mode 100644 tests/test_libs_io_utils.py diff --git a/cl_sii/libs/io_utils.py b/cl_sii/libs/io_utils.py new file mode 100644 index 00000000..a69ce6a3 --- /dev/null +++ b/cl_sii/libs/io_utils.py @@ -0,0 +1,71 @@ +import codecs +import io +from typing import IO + + +# notes: +# - For streams and modes see 'io.open()' +# - Stream classes have a pretty strange 'typing'/ABC/inheritance/etc arrangement because, +# among others, they are implemented in C. +# - Use `IO[X]` for arguments and `TextIO`/`BinaryIO` for return types (says GVR). +# https://github.com/python/typing/issues/518#issuecomment-350903120 + + +def with_mode_binary(stream: IO) -> bool: + """ + Return whether ``stream`` is a binary stream (i.e. reads bytes). + """ + result = False + try: + result = 'b' in stream.mode + except AttributeError: + if isinstance(stream, (io.RawIOBase, io.BufferedIOBase, io.BytesIO)): + result = True + + return result + + +def with_mode_text(stream: IO) -> bool: + """ + Return whether ``stream`` is a text stream (i.e. reads strings). + """ + result = False + try: + result = 't' in stream.mode + except AttributeError: + if isinstance(stream, (io.TextIOBase, io.TextIOWrapper, io.StringIO)): + result = True + + return result + + +def with_encoding_utf8(text_stream: IO[str]) -> bool: + """ + Return whether ``text_stream`` is a text stream with encoding set to UTF-8. + + :raises TypeError: if ``text_stream`` is not a text stream + + """ + result = False + + if isinstance(text_stream, io.StringIO): + # note: 'StringIO' saves (unicode) strings in memory and therefore doesn't have (or need) + # an encoding, which is fine. + # https://stackoverflow.com/questions/9368865/io-stringio-encoding-in-python3/9368909#9368909 + result = True + else: + try: + text_stream_encoding: str = text_stream.encoding # type: ignore + except AttributeError as exc: + raise TypeError("Value is not a text stream.") from exc + if text_stream_encoding is None: + # e.g. the strange case of `tempfile.SpooledTemporaryFile(mode='rt', encoding='utf-8')` + pass + else: + try: + text_stream_encoding_norm = codecs.lookup(text_stream_encoding).name + result = text_stream_encoding_norm == 'utf-8' + except LookupError: + pass + + return result diff --git a/tests/test_libs_io_utils.py b/tests/test_libs_io_utils.py new file mode 100644 index 00000000..7a850b3e --- /dev/null +++ b/tests/test_libs_io_utils.py @@ -0,0 +1,170 @@ +import io +import pathlib +import tempfile +import unittest + +from cl_sii.libs.io_utils import with_encoding_utf8, with_mode_binary, with_mode_text # noqa: F401 + + +class FunctionsTest(unittest.TestCase): + + def test_with_encoding_utf8(self): + filename = pathlib.Path(__file__).with_name('test_libs_io_utils-test-file-1.tmp') + filename.touch() + + # Binary mode + + with open(str(filename), mode='rb') as f: + self.assertTrue(isinstance(f, io.BufferedReader)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + with open(str(filename), mode='wb') as f: + self.assertTrue(isinstance(f, io.BufferedWriter)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + with open(str(filename), mode='w+b') as f: + self.assertTrue(isinstance(f, io.BufferedRandom)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + with io.BytesIO() as f: + self.assertTrue(isinstance(f, io.BytesIO)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + with tempfile.NamedTemporaryFile() as f: + self.assertTrue(isinstance(f, tempfile._TemporaryFileWrapper)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + with tempfile.SpooledTemporaryFile() as f: + self.assertTrue(isinstance(f, tempfile.SpooledTemporaryFile)) + with self.assertRaises(TypeError): + with_encoding_utf8(f) + + # Text mode - encoding 'utf-8' + + with open(str(filename), mode='rt', encoding='utf-8') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertTrue(with_encoding_utf8(f)) + + with open(str(filename), mode='wt', encoding='utf-8') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertTrue(with_encoding_utf8(f)) + + with open(str(filename), mode='w+t', encoding='utf-8') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertTrue(with_encoding_utf8(f)) + + with io.StringIO() as f: + # note: has no encoding + self.assertTrue(isinstance(f, io.StringIO)) + self.assertTrue(with_encoding_utf8(f)) + + with tempfile.NamedTemporaryFile(mode='rt', encoding='utf-8') as f: + self.assertTrue(isinstance(f, tempfile._TemporaryFileWrapper)) + self.assertTrue(with_encoding_utf8(f)) + + with tempfile.SpooledTemporaryFile(mode='rt', encoding='utf-8') as f: + self.assertTrue(isinstance(f, tempfile.SpooledTemporaryFile)) + # note: this is a strange case. + self.assertFalse(with_encoding_utf8(f)) + + # Text mode - encoding 'latin1' + + with open(str(filename), mode='rt', encoding='latin1') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_encoding_utf8(f)) + + with open(str(filename), mode='wt', encoding='latin1') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_encoding_utf8(f)) + + with open(str(filename), mode='w+t', encoding='latin1') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_encoding_utf8(f)) + + with tempfile.NamedTemporaryFile(mode='rt', encoding='latin1') as f: + self.assertTrue(isinstance(f, tempfile._TemporaryFileWrapper)) + self.assertFalse(with_encoding_utf8(f)) + + with tempfile.SpooledTemporaryFile(mode='rt', encoding='latin1') as f: + self.assertTrue(isinstance(f, tempfile.SpooledTemporaryFile)) + self.assertFalse(with_encoding_utf8(f)) + + filename.unlink() + + def test_with_mode_x(self): + # For the sake of simplicity test here both 'with_mode_binary' and 'with_mode_text'. + + filename = pathlib.Path(__file__).with_name('test_libs_io_utils-test-file-2.tmp') + filename.touch() + + # Binary mode + + with open(str(filename), mode='rb') as f: + self.assertTrue(isinstance(f, io.BufferedReader)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + with open(str(filename), mode='wb') as f: + self.assertTrue(isinstance(f, io.BufferedWriter)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + with open(str(filename), mode='w+b') as f: + self.assertTrue(isinstance(f, io.BufferedRandom)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + with io.BytesIO() as f: + self.assertTrue(isinstance(f, io.BytesIO)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + with tempfile.NamedTemporaryFile() as f: + + self.assertTrue(isinstance(f, tempfile._TemporaryFileWrapper)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + with tempfile.SpooledTemporaryFile() as f: + self.assertTrue(isinstance(f, tempfile.SpooledTemporaryFile)) + self.assertTrue(with_mode_binary(f)) + self.assertFalse(with_mode_text(f)) + + # Text mode + + with open(str(filename), mode='rt') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + with open(str(filename), mode='wt') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + with open(str(filename), mode='w+t') as f: + self.assertTrue(isinstance(f, io.TextIOWrapper)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + with io.StringIO() as f: + self.assertTrue(isinstance(f, io.StringIO)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + with tempfile.NamedTemporaryFile(mode='rt') as f: + self.assertTrue(isinstance(f, tempfile._TemporaryFileWrapper)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + with tempfile.SpooledTemporaryFile(mode='rt') as f: + self.assertTrue(isinstance(f, tempfile.SpooledTemporaryFile)) + self.assertFalse(with_mode_binary(f)) + self.assertTrue(with_mode_text(f)) + + filename.unlink() From 130671805567ec8fb1b74202e1ed3033973aa47d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Mon, 10 Jun 2019 20:24:08 -0400 Subject: [PATCH 11/14] rcv.parse_csv: significant changes to parse functions - Now they yield dataclass `RcvDetalleEntry` instances instead of dataclass `DteDataL2` instances. - Params `{emisor, receptor}_{rut, razon_social}` renamed to `rut` and `razon_social` respectively. - `parse_rcv_venta_csv_file`: fields `Fecha Acuse Recibo` and `Fecha Reclamo` are not ignored anymore. - Marshmallow schemas' field `tipo_dte` replaced by `tipo_docto`. - Marshmallow schemas' method `to_dte_data_l2` replaced by `to_detalle_entry`. --- cl_sii/rcv/parse_csv.py | 373 +++++++++++++++++++++++++++------------- 1 file changed, 258 insertions(+), 115 deletions(-) diff --git a/cl_sii/rcv/parse_csv.py b/cl_sii/rcv/parse_csv.py index 12abf4c7..a4ac484c 100644 --- a/cl_sii/rcv/parse_csv.py +++ b/cl_sii/rcv/parse_csv.py @@ -5,15 +5,15 @@ """ import csv -from datetime import date import logging +from datetime import date, datetime from typing import Dict, Iterable, Optional, Sequence, Tuple import marshmallow import marshmallow.fields import marshmallow.validate -from cl_sii.dte.data_models import DteDataL2 +from cl_sii.base.constants import SII_OFFICIAL_TZ from cl_sii.extras import mm_fields from cl_sii.libs import csv_utils from cl_sii.libs import mm_utils @@ -21,40 +21,44 @@ from cl_sii.libs import tz_utils from cl_sii.rut import Rut +from .data_models import ( + RcvDetalleEntry, RcNoIncluirDetalleEntry, + RcPendienteDetalleEntry, RcReclamadoDetalleEntry, + RcRegistroDetalleEntry, RvDetalleEntry, +) + logger = logging.getLogger(__name__) def parse_rcv_venta_csv_file( - emisor_rut: Rut, - emisor_razon_social: str, + rut: Rut, + razon_social: str, input_file_path: str, n_rows_offset: int = 0, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RvDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV "Venta" file (CSV). + Parse entries from an RV ("Registro de Ventas") (CSV file). """ schema_context = dict( - emisor_rut=emisor_rut, - emisor_razon_social=emisor_razon_social, + emisor_rut=rut, + emisor_razon_social=razon_social, ) input_csv_row_schema = RcvVentaCsvRowSchema(context=schema_context) expected_input_field_names = ( 'Nro', - 'Tipo Doc', # 'tipo_dte' + 'Tipo Doc', # 'tipo_docto' 'Tipo Venta', 'Rut cliente', # 'receptor_rut' 'Razon Social', # 'receptor_razon_social' 'Folio', # 'folio' 'Fecha Docto', # 'fecha_emision_date' 'Fecha Recepcion', # 'fecha_recepcion_dt' - # 'Fecha Acuse Recibo', # 'fecha_acuse_recibo_dt' - 'Fecha Acuse Recibo', - # 'Fecha Reclamo', # 'fecha_reclamo_dt' - 'Fecha Reclamo', + 'Fecha Acuse Recibo', # 'fecha_acuse_dt' + 'Fecha Reclamo', # 'fecha_reclamo_dt' 'Monto Exento', 'Monto Neto', 'Monto IVA', @@ -93,8 +97,6 @@ def parse_rcv_venta_csv_file( fields_to_remove_names = ( 'Nro', 'Tipo Venta', - 'Fecha Acuse Recibo', - 'Fecha Reclamo', 'Monto Exento', 'Monto Neto', 'Monto IVA', @@ -129,7 +131,9 @@ def parse_rcv_venta_csv_file( 'Tasa Otro Imp.', ) - yield from _parse_rcv_csv_file( + # note: mypy will complain about returned dataclass type mismatch (and it is right to do so) + # but we know from logic which subclass of 'RcvDetalleEntry' will be yielded. + yield from _parse_rcv_csv_file( # type: ignore input_csv_row_schema, expected_input_field_names, fields_to_remove_names, @@ -140,25 +144,25 @@ def parse_rcv_venta_csv_file( def parse_rcv_compra_registro_csv_file( - receptor_rut: Rut, - receptor_razon_social: str, + rut: Rut, + razon_social: str, input_file_path: str, n_rows_offset: int = 0, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RcRegistroDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV "Compra/Registro" file (CSV). + Parse entries from an RC ("Registro de Compras") / "registro" (CSV file). """ schema_context = dict( - receptor_rut=receptor_rut, - receptor_razon_social=receptor_razon_social, + receptor_rut=rut, + receptor_razon_social=razon_social, ) input_csv_row_schema = RcvCompraRegistroCsvRowSchema(context=schema_context) expected_input_field_names = ( 'Nro', - 'Tipo Doc', # 'tipo_dte' + 'Tipo Doc', # 'tipo_docto' 'Tipo Compra', 'RUT Proveedor', # 'emisor_rut' 'Razon Social', # 'emisor_razon_social' @@ -208,7 +212,9 @@ def parse_rcv_compra_registro_csv_file( 'Tasa Otro Impuesto', ) - yield from _parse_rcv_csv_file( + # note: mypy will complain about returned dataclass type mismatch (and it is right to do so) + # but we know from logic which subclass of 'RcvDetalleEntry' will be yielded. + yield from _parse_rcv_csv_file( # type: ignore input_csv_row_schema, expected_input_field_names, fields_to_remove_names, @@ -219,25 +225,25 @@ def parse_rcv_compra_registro_csv_file( def parse_rcv_compra_no_incluir_csv_file( - receptor_rut: Rut, - receptor_razon_social: str, + rut: Rut, + razon_social: str, input_file_path: str, n_rows_offset: int = 0, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RcNoIncluirDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV "Compra/no incluir" file (CSV). + Parse entries from an RC ("Registro de Compras") / "no incluir" (CSV file). """ schema_context = dict( - receptor_rut=receptor_rut, - receptor_razon_social=receptor_razon_social, + receptor_rut=rut, + receptor_razon_social=razon_social, ) input_csv_row_schema = RcvCompraNoIncluirCsvRowSchema(context=schema_context) expected_input_field_names = ( 'Nro', - 'Tipo Doc', # 'tipo_dte' + 'Tipo Doc', # 'tipo_docto' 'Tipo Compra', 'RUT Proveedor', # 'emisor_rut' 'Razon Social', # 'emisor_razon_social' @@ -281,7 +287,9 @@ def parse_rcv_compra_no_incluir_csv_file( 'Tasa Otro Impuesto', ) - yield from _parse_rcv_csv_file( + # note: mypy will complain about returned dataclass type mismatch (and it is right to do so) + # but we know from logic which subclass of 'RcvDetalleEntry' will be yielded. + yield from _parse_rcv_csv_file( # type: ignore input_csv_row_schema, expected_input_field_names, fields_to_remove_names, @@ -292,25 +300,25 @@ def parse_rcv_compra_no_incluir_csv_file( def parse_rcv_compra_reclamado_csv_file( - receptor_rut: Rut, - receptor_razon_social: str, + rut: Rut, + razon_social: str, input_file_path: str, n_rows_offset: int = 0, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RcReclamadoDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV "Compra/reclamado" file (CSV). + Parse entries from an RC ("Registro de Compras") / "reclamado" (CSV file). """ schema_context = dict( - receptor_rut=receptor_rut, - receptor_razon_social=receptor_razon_social, + receptor_rut=rut, + receptor_razon_social=razon_social, ) input_csv_row_schema = RcvCompraReclamadoCsvRowSchema(context=schema_context) expected_input_field_names = ( 'Nro', - 'Tipo Doc', # 'tipo_dte' + 'Tipo Doc', # 'tipo_docto' 'Tipo Compra', 'RUT Proveedor', # 'emisor_rut' 'Razon Social', # 'emisor_razon_social' @@ -354,7 +362,9 @@ def parse_rcv_compra_reclamado_csv_file( 'Tasa Otro Impuesto', ) - yield from _parse_rcv_csv_file( + # note: mypy will complain about returned dataclass type mismatch (and it is right to do so) + # but we know from logic which subclass of 'RcvDetalleEntry' will be yielded. + yield from _parse_rcv_csv_file( # type: ignore input_csv_row_schema, expected_input_field_names, fields_to_remove_names, @@ -365,25 +375,25 @@ def parse_rcv_compra_reclamado_csv_file( def parse_rcv_compra_pendiente_csv_file( - receptor_rut: Rut, - receptor_razon_social: str, + rut: Rut, + razon_social: str, input_file_path: str, n_rows_offset: int = 0, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RcPendienteDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV "Compra/pendiente" file (CSV). + Parse entries from an RC ("Registro de Compras") / "pendiente" (CSV file). """ schema_context = dict( - receptor_rut=receptor_rut, - receptor_razon_social=receptor_razon_social, + receptor_rut=rut, + receptor_razon_social=razon_social, ) input_csv_row_schema = RcvCompraPendienteCsvRowSchema(context=schema_context) expected_input_field_names = ( 'Nro', - 'Tipo Doc', # 'tipo_dte' + 'Tipo Doc', # 'tipo_docto' 'Tipo Compra', 'RUT Proveedor', # 'emisor_rut' 'Razon Social', # 'emisor_razon_social' @@ -426,7 +436,9 @@ def parse_rcv_compra_pendiente_csv_file( 'Tasa Otro Impuesto', ) - yield from _parse_rcv_csv_file( + # note: mypy will complain about returned dataclass type mismatch (and it is right to do so) + # but we know from logic which subclass of 'RcvDetalleEntry' will be yielded. + yield from _parse_rcv_csv_file( # type: ignore input_csv_row_schema, expected_input_field_names, fields_to_remove_names, @@ -450,50 +462,15 @@ def validate_schema(self, data: dict, original_data: dict) -> None: # def validate_field_x(self, value): # pass - def to_dte_data_l2(self, data: dict) -> DteDataL2: - # note: the data of some serializer fields may not be included in the returned struct. - - try: - emisor_rut: Rut = data['emisor_rut'] # type: ignore - receptor_rut: Rut = data['receptor_rut'] # type: ignore - tipo_dte = data['tipo_dte'] # type: ignore - folio: int = data['folio'] # type: ignore - fecha_emision_date: date = data['fecha_emision_date'] # type: ignore - monto_total: int = data['monto_total'] # type: ignore - emisor_razon_social: str = data['emisor_razon_social'] # type: ignore - receptor_razon_social: str = data['receptor_razon_social'] # type: ignore - except KeyError as exc: - raise ValueError("Programming error: a referenced field is missing.") from exc - - try: - dte_data = DteDataL2( - emisor_rut=emisor_rut, - tipo_dte=tipo_dte, - folio=folio, - fecha_emision_date=fecha_emision_date, - receptor_rut=receptor_rut, - monto_total=monto_total, - emisor_razon_social=emisor_razon_social, - receptor_razon_social=receptor_razon_social, - # fecha_vencimiento_date='', - # firma_documento_dt='', - # signature_value='', - # signature_x509_cert_der='', - # emisor_giro='', - # emisor_email='', - # receptor_email='', - ) - except (TypeError, ValueError): - raise - - return dte_data + def to_detalle_entry(self, data: dict) -> RcvDetalleEntry: + raise NotImplementedError class RcvVentaCsvRowSchema(_RcvCsvRowSchemaBase): - FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ - FIELD_FECHA_ACUSE_RECIBO_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ - FIELD_FECHA_RECLAMO_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_RECEPCION_DT_TZ = SII_OFFICIAL_TZ + FIELD_FECHA_ACUSE_DT_TZ = SII_OFFICIAL_TZ + FIELD_FECHA_RECLAMO_DT_TZ = SII_OFFICIAL_TZ class Meta: strict = True @@ -502,7 +479,7 @@ class Meta: # basic fields ########################################################################### - tipo_dte = mm_fields.TipoDteField( + tipo_docto = mm_fields.RcvTipoDoctoField( required=True, load_from='Tipo Doc', ) @@ -548,7 +525,7 @@ class Meta: required=True, load_from='Fecha Recepcion', ) - fecha_acuse_recibo_dt = marshmallow.fields.DateTime( + fecha_acuse_dt = marshmallow.fields.DateTime( format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' required=False, allow_none=True, @@ -594,20 +571,55 @@ def postprocess(self, data: dict) -> dict: # note: to express this value in another timezone (but the value does not change), do # `dt_obj.astimezone(pytz.timezone('some timezone'))` - if 'fecha_acuse_recibo_dt' in data and data['fecha_acuse_recibo_dt']: - data['fecha_acuse_recibo_dt'] = tz_utils.convert_naive_dt_to_tz_aware( - dt=data['fecha_acuse_recibo_dt'], tz=self.FIELD_FECHA_ACUSE_RECIBO_DT_TZ) + if 'fecha_acuse_dt' in data and data['fecha_acuse_dt']: + data['fecha_acuse_dt'] = tz_utils.convert_naive_dt_to_tz_aware( + dt=data['fecha_acuse_dt'], tz=self.FIELD_FECHA_ACUSE_DT_TZ) if 'fecha_reclamo_dt' in data and data['fecha_reclamo_dt']: data['fecha_reclamo_dt'] = tz_utils.convert_naive_dt_to_tz_aware( dt=data['fecha_reclamo_dt'], tz=self.FIELD_FECHA_RECLAMO_DT_TZ) return data + def to_detalle_entry(self, data: dict) -> RvDetalleEntry: + try: + emisor_rut: Rut = data['emisor_rut'] # type: ignore + tipo_docto = data['tipo_docto'] # type: ignore + folio: int = data['folio'] # type: ignore + fecha_emision_date: date = data['fecha_emision_date'] # type: ignore + receptor_rut: Rut = data['receptor_rut'] # type: ignore + monto_total: int = data['monto_total'] # type: ignore + emisor_razon_social: str = data['emisor_razon_social'] # type: ignore + receptor_razon_social: str = data['receptor_razon_social'] # type: ignore + fecha_recepcion_dt: datetime = data['fecha_recepcion_dt'] # type: ignore + fecha_acuse_dt: Optional[datetime] = data['fecha_acuse_dt'] # type: ignore + fecha_reclamo_dt: Optional[datetime] = data['fecha_reclamo_dt'] # type: ignore + except KeyError as exc: + raise ValueError("Programming error: a referenced field is missing.") from exc + + try: + detalle_entry = RvDetalleEntry( + emisor_rut=emisor_rut, + tipo_docto=tipo_docto, + folio=folio, + fecha_emision_date=fecha_emision_date, + receptor_rut=receptor_rut, + monto_total=monto_total, + emisor_razon_social=emisor_razon_social, + receptor_razon_social=receptor_razon_social, + fecha_recepcion_dt=fecha_recepcion_dt, + fecha_acuse_dt=fecha_acuse_dt, + fecha_reclamo_dt=fecha_reclamo_dt, + ) + except (TypeError, ValueError): + raise + + return detalle_entry + class RcvCompraRegistroCsvRowSchema(_RcvCsvRowSchemaBase): - FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ - FIELD_FECHA_ACUSE_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_RECEPCION_DT_TZ = SII_OFFICIAL_TZ + FIELD_FECHA_ACUSE_DT_TZ = SII_OFFICIAL_TZ class Meta: strict = True @@ -620,7 +632,7 @@ class Meta: required=True, load_from='RUT Proveedor', ) - tipo_dte = mm_fields.TipoDteField( + tipo_docto = mm_fields.RcvTipoDoctoField( required=True, load_from='Tipo Doc', ) @@ -705,14 +717,80 @@ def postprocess(self, data: dict) -> dict: return data + def to_detalle_entry(self, data: dict) -> RcRegistroDetalleEntry: + try: + emisor_rut: Rut = data['emisor_rut'] # type: ignore + tipo_docto = data['tipo_docto'] # type: ignore + folio: int = data['folio'] # type: ignore + fecha_emision_date: date = data['fecha_emision_date'] # type: ignore + receptor_rut: Rut = data['receptor_rut'] # type: ignore + monto_total: int = data['monto_total'] # type: ignore + emisor_razon_social: str = data['emisor_razon_social'] # type: ignore + receptor_razon_social: str = data['receptor_razon_social'] # type: ignore + fecha_recepcion_dt: datetime = data['fecha_recepcion_dt'] # type: ignore + fecha_acuse_dt: Optional[datetime] = data['fecha_acuse_dt'] # type: ignore + except KeyError as exc: + raise ValueError("Programming error: a referenced field is missing.") from exc + + try: + detalle_entry = RcRegistroDetalleEntry( + emisor_rut=emisor_rut, + tipo_docto=tipo_docto, + folio=folio, + fecha_emision_date=fecha_emision_date, + receptor_rut=receptor_rut, + monto_total=monto_total, + emisor_razon_social=emisor_razon_social, + receptor_razon_social=receptor_razon_social, + fecha_recepcion_dt=fecha_recepcion_dt, + fecha_acuse_dt=fecha_acuse_dt, + ) + except (TypeError, ValueError): + raise + + return detalle_entry + + +class RcvCompraNoIncluirCsvRowSchema(RcvCompraRegistroCsvRowSchema): + + def to_detalle_entry(self, data: dict) -> RcNoIncluirDetalleEntry: + try: + emisor_rut: Rut = data['emisor_rut'] # type: ignore + tipo_docto = data['tipo_docto'] # type: ignore + folio: int = data['folio'] # type: ignore + fecha_emision_date: date = data['fecha_emision_date'] # type: ignore + receptor_rut: Rut = data['receptor_rut'] # type: ignore + monto_total: int = data['monto_total'] # type: ignore + emisor_razon_social: str = data['emisor_razon_social'] # type: ignore + receptor_razon_social: str = data['receptor_razon_social'] # type: ignore + fecha_recepcion_dt: datetime = data['fecha_recepcion_dt'] # type: ignore + fecha_acuse_dt: Optional[datetime] = data['fecha_acuse_dt'] # type: ignore + except KeyError as exc: + raise ValueError("Programming error: a referenced field is missing.") from exc + + try: + detalle_entry = RcNoIncluirDetalleEntry( + emisor_rut=emisor_rut, + tipo_docto=tipo_docto, + folio=folio, + fecha_emision_date=fecha_emision_date, + receptor_rut=receptor_rut, + monto_total=monto_total, + emisor_razon_social=emisor_razon_social, + receptor_razon_social=receptor_razon_social, + fecha_recepcion_dt=fecha_recepcion_dt, + fecha_acuse_dt=fecha_acuse_dt, + ) + except (TypeError, ValueError): + raise -RcvCompraNoIncluirCsvRowSchema = RcvCompraRegistroCsvRowSchema + return detalle_entry class RcvCompraReclamadoCsvRowSchema(_RcvCsvRowSchemaBase): - FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ - FIELD_FECHA_RECLAMO_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_RECEPCION_DT_TZ = SII_OFFICIAL_TZ + FIELD_FECHA_RECLAMO_DT_TZ = SII_OFFICIAL_TZ class Meta: strict = True @@ -725,7 +803,7 @@ class Meta: required=True, load_from='RUT Proveedor', ) - tipo_dte = mm_fields.TipoDteField( + tipo_docto = mm_fields.RcvTipoDoctoField( required=True, load_from='Tipo Doc', ) @@ -768,8 +846,9 @@ class Meta: load_from='Fecha Recepcion', ) fecha_reclamo_dt = marshmallow.fields.DateTime( - # note: for some reason the DTEs with `tipo_dte=` - # (and maybe others as well) do not have this field set (always? we do not know). + # note: for some reason the rows with 'tipo_docto' equal to + # '' (and maybe others as well) do not + # have this field set (always? we do not know). format='%d/%m/%Y %H:%M:%S', # e.g. '23/10/2018 01:54:13' required=False, allow_none=True, @@ -786,8 +865,9 @@ def preprocess(self, in_data: dict) -> dict: in_data.setdefault('receptor_razon_social', self.context['receptor_razon_social']) # Fix missing/default values. - # note: for some reason the DTEs with `tipo_dte=` - # (and maybe others as well) do not have this field set (always? we do not know). + # note: for some reason the rows with 'tipo_docto' equal to + # '' (and maybe others as well) do not + # have this field set (always? we do not know). if 'Fecha Reclamo' in in_data: if in_data['Fecha Reclamo'] == '' or 'null' in in_data['Fecha Reclamo']: in_data['Fecha Reclamo'] = None @@ -814,11 +894,44 @@ def postprocess(self, data: dict) -> dict: return data + def to_detalle_entry(self, data: dict) -> RcReclamadoDetalleEntry: + try: + emisor_rut: Rut = data['emisor_rut'] # type: ignore + tipo_docto = data['tipo_docto'] # type: ignore + folio: int = data['folio'] # type: ignore + fecha_emision_date: date = data['fecha_emision_date'] # type: ignore + receptor_rut: Rut = data['receptor_rut'] # type: ignore + monto_total: int = data['monto_total'] # type: ignore + emisor_razon_social: str = data['emisor_razon_social'] # type: ignore + receptor_razon_social: str = data['receptor_razon_social'] # type: ignore + fecha_recepcion_dt: datetime = data['fecha_recepcion_dt'] # type: ignore + fecha_reclamo_dt: Optional[datetime] = data['fecha_reclamo_dt'] # type: ignore + except KeyError as exc: + raise ValueError("Programming error: a referenced field is missing.") from exc + + try: + detalle_entry = RcReclamadoDetalleEntry( + emisor_rut=emisor_rut, + tipo_docto=tipo_docto, + folio=folio, + fecha_emision_date=fecha_emision_date, + receptor_rut=receptor_rut, + monto_total=monto_total, + emisor_razon_social=emisor_razon_social, + receptor_razon_social=receptor_razon_social, + fecha_recepcion_dt=fecha_recepcion_dt, + fecha_reclamo_dt=fecha_reclamo_dt, + ) + except (TypeError, ValueError): + raise + + return detalle_entry + class RcvCompraPendienteCsvRowSchema(_RcvCsvRowSchemaBase): - FIELD_FECHA_RECEPCION_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ - FIELD_FECHA_ACUSE_DT_TZ = DteDataL2.DATETIME_FIELDS_TZ + FIELD_FECHA_RECEPCION_DT_TZ = SII_OFFICIAL_TZ + FIELD_FECHA_ACUSE_DT_TZ = SII_OFFICIAL_TZ class Meta: strict = True @@ -831,7 +944,7 @@ class Meta: required=True, load_from='RUT Proveedor', ) - tipo_dte = mm_fields.TipoDteField( + tipo_docto = mm_fields.RcvTipoDoctoField( required=True, load_from='Tipo Doc', ) @@ -906,6 +1019,37 @@ def postprocess(self, data: dict) -> dict: return data + def to_detalle_entry(self, data: dict) -> RcPendienteDetalleEntry: + try: + emisor_rut: Rut = data['emisor_rut'] # type: ignore + tipo_docto = data['tipo_docto'] # type: ignore + folio: int = data['folio'] # type: ignore + fecha_emision_date: date = data['fecha_emision_date'] # type: ignore + receptor_rut: Rut = data['receptor_rut'] # type: ignore + monto_total: int = data['monto_total'] # type: ignore + emisor_razon_social: str = data['emisor_razon_social'] # type: ignore + receptor_razon_social: str = data['receptor_razon_social'] # type: ignore + fecha_recepcion_dt: datetime = data['fecha_recepcion_dt'] # type: ignore + except KeyError as exc: + raise ValueError("Programming error: a referenced field is missing.") from exc + + try: + detalle_entry = RcPendienteDetalleEntry( + emisor_rut=emisor_rut, + tipo_docto=tipo_docto, + folio=folio, + fecha_emision_date=fecha_emision_date, + receptor_rut=receptor_rut, + monto_total=monto_total, + emisor_razon_social=emisor_razon_social, + receptor_razon_social=receptor_razon_social, + fecha_recepcion_dt=fecha_recepcion_dt, + ) + except (TypeError, ValueError): + raise + + return detalle_entry + ############################################################################### # helpers @@ -942,11 +1086,12 @@ def _parse_rcv_csv_file( input_file_path: str, n_rows_offset: int, max_n_rows: int = None, -) -> Iterable[Tuple[Optional[DteDataL2], int, Dict[str, object], Dict[str, object]]]: +) -> Iterable[Tuple[Optional[RcvDetalleEntry], int, Dict[str, object], Dict[str, object]]]: """ - Parse DTE data objects from a RCV file (CSV). + Parse entries from an RC or RV (CSV file). - Common implementation for the different kinds of RCV files (CSV). + Common implementation for the different alternatives that depend on the + kind of RC and RV. """ for field_to_remove_name in fields_to_remove_names: @@ -982,15 +1127,13 @@ def _parse_rcv_csv_file( ) for row_ix, row_data, deserialized_row_data, validation_errors in g: - logger.debug("Processing row %s. Content: %s", row_ix, repr(row_data)) - - dte_data = None + entry: Optional[RcvDetalleEntry] = None row_errors: Dict[str, object] = {} conversion_error = None if not validation_errors: try: - dte_data = input_csv_row_schema.to_dte_data_l2(deserialized_row_data) + entry = input_csv_row_schema.to_detalle_entry(deserialized_row_data) except Exception as exc: conversion_error = str(exc) logger.exception( @@ -1003,4 +1146,4 @@ def _parse_rcv_csv_file( if conversion_error: row_errors['other'] = conversion_error - yield dte_data, row_ix, row_data, row_errors + yield entry, row_ix, row_data, row_errors From 932aded679df28275169bb25b8576c48f976ba14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Thu, 13 Jun 2019 12:41:53 -0400 Subject: [PATCH 12/14] rcv: add `RcvKind.is_estado_contable_compatible` --- cl_sii/rcv/constants.py | 16 ++++++++++++++++ tests/test_rcv_constants.py | 13 +++++++++++++ 2 files changed, 29 insertions(+) diff --git a/cl_sii/rcv/constants.py b/cl_sii/rcv/constants.py index ce4a94e6..edbaebbe 100644 --- a/cl_sii/rcv/constants.py +++ b/cl_sii/rcv/constants.py @@ -1,4 +1,7 @@ +from __future__ import annotations + import enum +from typing import Optional from ..dte.constants import TipoDteEnum @@ -33,6 +36,19 @@ class RcvKind(enum.Enum): VENTAS = 'VENTAS' """RCV / ventas.""" + def is_estado_contable_compatible(self, value: Optional[RcEstadoContable]) -> bool: + if value is not None and not isinstance(value, RcEstadoContable): + raise TypeError("Value must be None or a 'RcEstadoContable'.") + + if self == RcvKind.COMPRAS and value is not None: + result = True + elif self == RcvKind.VENTAS and value is None: + result = True + else: + result = False + + return result + @enum.unique class RcEstadoContable(enum.Enum): diff --git a/tests/test_rcv_constants.py b/tests/test_rcv_constants.py index 6c810479..78a008e7 100644 --- a/tests/test_rcv_constants.py +++ b/tests/test_rcv_constants.py @@ -22,6 +22,19 @@ def test_values_type(self): {str} ) + def test_is_estado_contable_compatible(self): + self.assertTrue(RcvKind.VENTAS.is_estado_contable_compatible(None)) + self.assertTrue(RcvKind.COMPRAS.is_estado_contable_compatible(RcEstadoContable.REGISTRO)) + self.assertTrue(RcvKind.COMPRAS.is_estado_contable_compatible(RcEstadoContable.NO_INCLUIR)) + self.assertTrue(RcvKind.COMPRAS.is_estado_contable_compatible(RcEstadoContable.RECLAMADO)) + self.assertTrue(RcvKind.COMPRAS.is_estado_contable_compatible(RcEstadoContable.PENDIENTE)) + + self.assertFalse(RcvKind.COMPRAS.is_estado_contable_compatible(None)) + self.assertFalse(RcvKind.VENTAS.is_estado_contable_compatible(RcEstadoContable.REGISTRO)) + self.assertFalse(RcvKind.VENTAS.is_estado_contable_compatible(RcEstadoContable.NO_INCLUIR)) + self.assertFalse(RcvKind.VENTAS.is_estado_contable_compatible(RcEstadoContable.RECLAMADO)) + self.assertFalse(RcvKind.VENTAS.is_estado_contable_compatible(RcEstadoContable.PENDIENTE)) + class RcEstadoContableTest(unittest.TestCase): From 1fa395cbce7448f652e330737f491af55f0414b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Thu, 13 Jun 2019 15:53:34 -0400 Subject: [PATCH 13/14] HISTORY: update for new version --- HISTORY.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/HISTORY.rst b/HISTORY.rst index b6e1d6c7..7b2103c3 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -3,6 +3,15 @@ History ------- +0.7.0 (2019-06-13) ++++++++++++++++++++++++ + +* (PR #63, 2019-06-13) rcv.parse_csv: significant changes to parse functions +* (PR #62, 2019-06-13) libs: add module ``io_utils`` +* (PR #61, 2019-06-12) rcv: add data models, constants and more +* (PR #60, 2019-06-12) libs.tz_utils: misc +* (PR #59, 2019-05-31) rcv.parse_csv: add ``parse_rcv_compra_X_csv_file`` + 0.6.5 (2019-05-29) +++++++++++++++++++++++ From fc8f3d28a5257b9afbb6c644819d21314c6d6f0e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Germ=C3=A1n=20Larra=C3=ADn?= Date: Thu, 13 Jun 2019 15:53:57 -0400 Subject: [PATCH 14/14] =?UTF-8?q?Bump=20version:=200.6.5=20=E2=86=92=200.7?= =?UTF-8?q?.0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .bumpversion.cfg | 2 +- cl_sii/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.bumpversion.cfg b/.bumpversion.cfg index 079dba51..00af7705 100644 --- a/.bumpversion.cfg +++ b/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.6.5 +current_version = 0.7.0 commit = True tag = True diff --git a/cl_sii/__init__.py b/cl_sii/__init__.py index 35528978..ad9524d2 100644 --- a/cl_sii/__init__.py +++ b/cl_sii/__init__.py @@ -5,4 +5,4 @@ """ -__version__ = '0.6.5' +__version__ = '0.7.0'