From 56d50009ad404fb8f9ea1232383e925a1e412201 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tiziano=20M=C3=BCller?= Date: Mon, 27 Aug 2018 17:54:12 +0200 Subject: [PATCH] replace all `dict.iteritems()` with `dict.items()` --- .ci/test_daemon.py | 4 +- aiida/backends/djsite/cmdline.py | 2 +- aiida/backends/djsite/db/models.py | 22 +++---- aiida/backends/djsite/db/subtests/nodes.py | 4 +- aiida/backends/general/abstractqueries.py | 2 +- aiida/backends/sqlalchemy/models/workflow.py | 2 +- aiida/backends/sqlalchemy/utils.py | 4 +- aiida/backends/tests/__init__.py | 8 +-- aiida/backends/tests/calculation_node.py | 2 +- .../tests/cmdline/commands/test_computer.py | 4 +- aiida/backends/tests/export_and_import.py | 2 +- aiida/backends/tests/nodes.py | 20 +++--- aiida/backends/tests/orm/data/frozendict.py | 2 +- aiida/backends/tests/query.py | 8 +-- aiida/backends/tests/restapi.py | 2 +- aiida/backends/tests/work/process.py | 2 +- .../tests/work/test_process_builder.py | 2 +- aiida/backends/tests/work/work_chain.py | 12 ++-- aiida/cmdline/commands/cmd_devel.py | 2 +- aiida/cmdline/commands/cmd_export.py | 8 +-- aiida/cmdline/utils/multi_line_input.py | 2 +- aiida/common/graph.py | 8 +-- aiida/common/log.py | 2 +- aiida/common/orbital/__init__.py | 2 +- aiida/common/setup.py | 2 +- aiida/orm/data/__init__.py | 2 +- aiida/orm/data/array/bands.py | 2 +- aiida/orm/data/array/trajectory.py | 2 +- aiida/orm/data/frozendict.py | 2 +- aiida/orm/data/parameter.py | 2 +- aiida/orm/data/structure.py | 2 +- aiida/orm/data/upf.py | 2 +- aiida/orm/implementation/django/computer.py | 2 +- aiida/orm/implementation/django/group.py | 6 +- aiida/orm/implementation/django/log.py | 2 +- aiida/orm/implementation/django/workflow.py | 4 +- .../general/calculation/job/__init__.py | 6 +- aiida/orm/implementation/general/node.py | 10 +-- .../orm/implementation/sqlalchemy/computer.py | 2 +- aiida/orm/implementation/sqlalchemy/group.py | 2 +- aiida/orm/implementation/sqlalchemy/log.py | 2 +- .../orm/implementation/sqlalchemy/workflow.py | 2 +- aiida/orm/importexport.py | 64 +++++++++---------- aiida/plugins/entry.py | 6 +- aiida/plugins/loader.py | 2 +- aiida/plugins/registry.py | 2 +- aiida/restapi/common/utils.py | 4 +- aiida/restapi/translator/base.py | 8 +-- aiida/scheduler/datastructures.py | 4 +- aiida/scheduler/plugins/direct.py | 2 +- aiida/scheduler/plugins/lsf.py | 2 +- aiida/scheduler/plugins/pbsbaseclasses.py | 2 +- aiida/scheduler/plugins/sge.py | 2 +- aiida/scheduler/plugins/slurm.py | 2 +- aiida/tools/dbexporters/tcod.py | 6 +- aiida/tools/dbimporters/baseclasses.py | 2 +- aiida/tools/dbimporters/plugins/icsd.py | 4 +- aiida/transport/plugins/test_all_plugins.py | 2 +- aiida/utils/serialize.py | 12 ++-- aiida/work/job_processes.py | 6 +- aiida/work/processes.py | 8 +-- aiida/work/workchain.py | 2 +- examples/work/common.py | 2 +- examples/work/scratch.py | 2 +- 64 files changed, 164 insertions(+), 164 deletions(-) diff --git a/.ci/test_daemon.py b/.ci/test_daemon.py index 866198781a..a3ccdd3223 100644 --- a/.ci/test_daemon.py +++ b/.ci/test_daemon.py @@ -79,7 +79,7 @@ def print_logshow(pk): def validate_calculations(expected_results): valid = True actual_dict = {} - for pk, expected_dict in expected_results.iteritems(): + for pk, expected_dict in expected_results.items(): calc = load_node(pk) if not calc.is_finished_ok: print('Calculation<{}> not finished ok: process_state<{}> exit_status<{}>' @@ -110,7 +110,7 @@ def validate_calculations(expected_results): def validate_workchains(expected_results): valid = True - for pk, expected_value in expected_results.iteritems(): + for pk, expected_value in expected_results.items(): this_valid = True try: calc = load_node(pk) diff --git a/aiida/backends/djsite/cmdline.py b/aiida/backends/djsite/cmdline.py index 75bd3adfe8..bd85c33636 100644 --- a/aiida/backends/djsite/cmdline.py +++ b/aiida/backends/djsite/cmdline.py @@ -24,7 +24,7 @@ def get_group_list(user, type_string, n_days_ago=None, name_filters={}): from aiida.orm.implementation.django.group import Group - name_filters = {"name__" + k: v for (k, v) in name_filters.iteritems() if v} + name_filters = {"name__" + k: v for (k, v) in name_filters.items() if v} if n_days_ago: n_days_ago = timezone.now() - datetime.timedelta(days=n_days_ago) diff --git a/aiida/backends/djsite/db/models.py b/aiida/backends/djsite/db/models.py index c2030a2217..6245fa8499 100644 --- a/aiida/backends/djsite/db/models.py +++ b/aiida/backends/djsite/db/models.py @@ -375,7 +375,7 @@ def _deserialize_attribute(mainitem, subitems, sep, original_class=None, # subitems contains all subitems, here I store only those of # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I # store only '0' and '1' - firstlevelsubdict = {k: v for k, v in subitems.iteritems() + firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k} # For checking, I verify the expected values @@ -427,10 +427,10 @@ def _deserialize_attribute(mainitem, subitems, sep, original_class=None, # I get the values in memory as a dictionary tempdict = {} - for firstsubk, firstsubv in firstlevelsubdict.iteritems(): + for firstsubk, firstsubv in firstlevelsubdict.items(): # I call recursively the same function to get subitems newsubitems = {k[len(firstsubk) + len(sep):]: v - for k, v in subitems.iteritems() + for k, v in subitems.items() if k.startswith(firstsubk + sep)} tempdict[firstsubk] = _deserialize_attribute(mainitem=firstsubv, subitems=newsubitems, sep=sep, original_class=original_class, @@ -443,7 +443,7 @@ def _deserialize_attribute(mainitem, subitems, sep, original_class=None, # subitems contains all subitems, here I store only those of # deepness 1, i.e. if I have subitems '0', '1' and '1.c' I # store only '0' and '1' - firstlevelsubdict = {k: v for k, v in subitems.iteritems() + firstlevelsubdict = {k: v for k, v in subitems.items() if sep not in k} if len(firstlevelsubdict) != mainitem['ival']: @@ -472,10 +472,10 @@ def _deserialize_attribute(mainitem, subitems, sep, original_class=None, # I get the values in memory as a dictionary tempdict = {} - for firstsubk, firstsubv in firstlevelsubdict.iteritems(): + for firstsubk, firstsubv in firstlevelsubdict.items(): # I call recursively the same function to get subitems newsubitems = {k[len(firstsubk) + len(sep):]: v - for k, v in subitems.iteritems() + for k, v in subitems.items() if k.startswith(firstsubk + sep)} tempdict[firstsubk] = _deserialize_attribute(mainitem=firstsubv, subitems=newsubitems, sep=sep, original_class=original_class, @@ -527,11 +527,11 @@ def deserialize_attributes(data, sep, original_class=None, original_pk=None): # I group results by zero-level entity found_mainitems = {} found_subitems = defaultdict(dict) - for mainkey, descriptiondict in data.iteritems(): + for mainkey, descriptiondict in data.items(): prefix, thissep, postfix = mainkey.partition(sep) if thissep: found_subitems[prefix][postfix] = {k: v for k, v - in descriptiondict.iteritems() if k != "key"} + in descriptiondict.items() if k != "key"} else: mainitem = descriptiondict.copy() mainitem['key'] = prefix @@ -546,7 +546,7 @@ def deserialize_attributes(data, sep, original_class=None, original_pk=None): # For each zero-level entity, I call the _deserialize_attribute function retval = {} - for k, v in found_mainitems.iteritems(): + for k, v in found_mainitems.items(): # Note: found_subitems[k] will return an empty dictionary it the # key does not exist, as it is a defaultdict retval[k] = _deserialize_attribute(mainitem=v, @@ -835,7 +835,7 @@ def create_value(cls, key, value, subspecifier_value=None, new_entry.ival = len(value) new_entry.fval = None - for subk, subv in value.iteritems(): + for subk, subv in value.items(): cls.validate_key(subk) # I do not need get_or_create here, because @@ -1134,7 +1134,7 @@ def reset_values_for_node(cls, dbnode, attributes, with_transaction=True, dbnode_node = dbnode # create_value returns a list of nodes to store - for k, v in attributes.iteritems(): + for k, v in attributes.items(): nodes_to_store.extend( cls.create_value(k, v, subspecifier_value=dbnode_node, diff --git a/aiida/backends/djsite/db/subtests/nodes.py b/aiida/backends/djsite/db/subtests/nodes.py index 5936e61f23..0d0db747f3 100644 --- a/aiida/backends/djsite/db/subtests/nodes.py +++ b/aiida/backends/djsite/db/subtests/nodes.py @@ -260,10 +260,10 @@ def test_replace_extras_2(self): 'list': 66.3, } - for k, v in extras_to_set.iteritems(): + for k, v in extras_to_set.items(): a.set_extra(k, v) - for k, v in new_extras.iteritems(): + for k, v in new_extras.items(): # I delete one by one the keys and check if the operation is # performed correctly a.set_extra(k, v) diff --git a/aiida/backends/general/abstractqueries.py b/aiida/backends/general/abstractqueries.py index b5cfd4e3c3..cb15692092 100644 --- a/aiida/backends/general/abstractqueries.py +++ b/aiida/backends/general/abstractqueries.py @@ -142,7 +142,7 @@ def count_statistics(dataset): def get_statistics_dict(dataset): results = {} for count, typestring in sorted( - (v, k) for k, v in dataset.iteritems())[::-1]: + (v, k) for k, v in dataset.items())[::-1]: results[typestring] = count return results diff --git a/aiida/backends/sqlalchemy/models/workflow.py b/aiida/backends/sqlalchemy/models/workflow.py index e26103da7a..d8f36d66c5 100644 --- a/aiida/backends/sqlalchemy/models/workflow.py +++ b/aiida/backends/sqlalchemy/models/workflow.py @@ -236,7 +236,7 @@ def get_or_create(self, **kwargs): # this is to emulate the django method if instance: return instance, False else: - params = dict((k, v) for k, v in kwargs.iteritems() if not isinstance(v, ClauseElement)) + params = dict((k, v) for k, v in kwargs.items() if not isinstance(v, ClauseElement)) instance = model(**params) session.add(instance) return instance, True diff --git a/aiida/backends/sqlalchemy/utils.py b/aiida/backends/sqlalchemy/utils.py index 3c95da9694..5dc4dd4437 100644 --- a/aiida/backends/sqlalchemy/utils.py +++ b/aiida/backends/sqlalchemy/utils.py @@ -110,7 +110,7 @@ def f(v): if isinstance(v, list): return [f(_) for _ in v] elif isinstance(v, dict): - return dict((key, f(val)) for key, val in v.iteritems()) + return dict((key, f(val)) for key, val in v.items()) elif isinstance(v, datetime.datetime): return v.isoformat() return v @@ -134,7 +134,7 @@ def f(d): d[i] = f(val) return d elif isinstance(d, dict): - for k, v in d.iteritems(): + for k, v in d.items(): d[k] = f(v) return d elif isinstance(d, basestring): diff --git a/aiida/backends/tests/__init__.py b/aiida/backends/tests/__init__.py index 46a25b99aa..ffb506738a 100644 --- a/aiida/backends/tests/__init__.py +++ b/aiida/backends/tests/__init__.py @@ -162,10 +162,10 @@ def get_db_test_list(): raise ConfigurationError("A 'common' key must always be defined!") retdict = defaultdict(list) - for k, tests in common_tests.iteritems(): + for k, tests in common_tests.items(): for t in tests: retdict[k].append(t) - for k, tests in be_tests.iteritems(): + for k, tests in be_tests.items(): for t in tests: retdict[k].append(t) @@ -177,9 +177,9 @@ def get_db_test_list(): # Explode the dictionary so that if I have a.b.c, # I can run it also just with 'a' or with 'a.b' final_retdict = defaultdict(list) - for k, v in retdict.iteritems(): + for k, v in retdict.items(): final_retdict[k] = v - for k, v in retdict.iteritems(): + for k, v in retdict.items(): if '.' in k: parts = k.split('.') for last_idx in range(1, len(parts)): diff --git a/aiida/backends/tests/calculation_node.py b/aiida/backends/tests/calculation_node.py index aed246db2d..9454eeda40 100644 --- a/aiida/backends/tests/calculation_node.py +++ b/aiida/backends/tests/calculation_node.py @@ -65,7 +65,7 @@ def test_calculation_updatable_attribute(self): 'state': self.stateval } - for k, v in attrs_to_set.iteritems(): + for k, v in attrs_to_set.items(): a._set_attr(k, v) # Check before storing diff --git a/aiida/backends/tests/cmdline/commands/test_computer.py b/aiida/backends/tests/cmdline/commands/test_computer.py index 893f666051..71e736b18b 100644 --- a/aiida/backends/tests/cmdline/commands/test_computer.py +++ b/aiida/backends/tests/cmdline/commands/test_computer.py @@ -61,7 +61,7 @@ def generate_setup_options(ordereddict): :return: a list to be passed as command-line arguments. """ options = [] - for key, value in ordereddict.iteritems(): + for key, value in ordereddict.items(): if value is None: options.append('--{}'.format(key)) else: @@ -81,7 +81,7 @@ def generate_setup_options_interactive(ordereddict): :return: a list to be passed as command-line arguments. """ options = [] - for key, value in ordereddict.iteritems(): + for key, value in ordereddict.items(): if value is None: options.append(True) else: diff --git a/aiida/backends/tests/export_and_import.py b/aiida/backends/tests/export_and_import.py index e09b4a8f21..f79df82138 100644 --- a/aiida/backends/tests/export_and_import.py +++ b/aiida/backends/tests/export_and_import.py @@ -974,7 +974,7 @@ def test_complex_graph_import_export(self): import_data(filename, silent=True, ignore_unknown_nodes=True) - for uuid, label in node_uuids_labels.iteritems(): + for uuid, label in node_uuids_labels.items(): try: load_node(uuid) except NotExistent: diff --git a/aiida/backends/tests/nodes.py b/aiida/backends/tests/nodes.py index 5dba47ee33..c085bb221d 100644 --- a/aiida/backends/tests/nodes.py +++ b/aiida/backends/tests/nodes.py @@ -607,7 +607,7 @@ def test_attributes_on_clone(self): 'emptylist': [], } - for k, v in attrs_to_set.iteritems(): + for k, v in attrs_to_set.items(): a._set_attr(k, v) # Create a copy @@ -1020,7 +1020,7 @@ def test_attr_listing(self): 'list': self.listval, } - for k, v in attrs_to_set.iteritems(): + for k, v in attrs_to_set.items(): a._set_attr(k, v) a.store() @@ -1031,7 +1031,7 @@ def test_attr_listing(self): 'some_other_name': 987 } - for k, v in extras_to_set.iteritems(): + for k, v in extras_to_set.items(): a.set_extra(k, v) all_extras = dict(_aiida_hash=AnyValue(), **extras_to_set) @@ -1059,14 +1059,14 @@ def test_versioning(self): 'list': self.listval, } - for key, value in attrs_to_set.iteritems(): + for key, value in attrs_to_set.items(): a._set_attr(key, value) self.assertEquals(a.get_attr(key), value) a.store() # Check after storing - for key, value in attrs_to_set.iteritems(): + for key, value in attrs_to_set.items(): self.assertEquals(a.get_attr(key), value) # Even if I stored many attributes, this should stay at 1 @@ -1102,7 +1102,7 @@ def test_delete_extras(self): all_extras = dict(_aiida_hash=AnyValue(), **extras_to_set) - for k, v in extras_to_set.iteritems(): + for k, v in extras_to_set.items(): a.set_extra(k, v) self.assertEquals({k: v for k, v in a.iterextras()}, all_extras) @@ -1154,12 +1154,12 @@ def test_replace_extras_1(self): 'list': 66.3, } - for k, v in extras_to_set.iteritems(): + for k, v in extras_to_set.items(): a.set_extra(k, v) self.assertEquals({k: v for k, v in a.iterextras()}, all_extras) - for k, v in new_extras.iteritems(): + for k, v in new_extras.items(): # I delete one by one the keys and check if the operation is # performed correctly a.set_extra(k, v) @@ -1967,11 +1967,11 @@ def test_link_with_unstored(self): n2.store_all() n3.store_all() - n2_in_links = [(l, n.uuid) for l, n in n2.get_inputs_dict().iteritems()] + n2_in_links = [(l, n.uuid) for l, n in n2.get_inputs_dict().items()] self.assertEquals(sorted(n2_in_links), sorted([ ('l1', n1.uuid), ])) - n3_in_links = [(l, n.uuid) for l, n in n3.get_inputs_dict().iteritems()] + n3_in_links = [(l, n.uuid) for l, n in n3.get_inputs_dict().items()] self.assertEquals( sorted(n3_in_links), sorted([ ('l2', n2.uuid), diff --git a/aiida/backends/tests/orm/data/frozendict.py b/aiida/backends/tests/orm/data/frozendict.py index 1ad8c8c421..716624dff1 100644 --- a/aiida/backends/tests/orm/data/frozendict.py +++ b/aiida/backends/tests/orm/data/frozendict.py @@ -30,7 +30,7 @@ def test_get_value(self): def test_iterate(self): input = {'a': Int(5).store(), 'b': Str('testing').store()} d = FrozenDict(dict=input) - for k, v in d.iteritems(): + for k, v in d.items(): self.assertEqual(input[k], v) def test_length(self): diff --git a/aiida/backends/tests/query.py b/aiida/backends/tests/query.py index dc8be96313..2bbdefb2f2 100644 --- a/aiida/backends/tests/query.py +++ b/aiida/backends/tests/query.py @@ -981,10 +981,10 @@ def store_and_add(n, statistics): new_db_statistics = qmanager.get_creation_statistics() # I only check a few fields - new_db_statistics = {k: v for k, v in new_db_statistics.iteritems() if k in expected_db_statistics} + new_db_statistics = {k: v for k, v in new_db_statistics.items() if k in expected_db_statistics} expected_db_statistics = {k: dict(v) if isinstance(v, defaultdict) else v - for k, v in expected_db_statistics.iteritems()} + for k, v in expected_db_statistics.items()} self.assertEquals(new_db_statistics, expected_db_statistics) @@ -1030,9 +1030,9 @@ class QueryManagerDefault(AbstractQueryManager): new_db_statistics = qmanager_default.get_creation_statistics() # I only check a few fields - new_db_statistics = {k: v for k, v in new_db_statistics.iteritems() if k in expected_db_statistics} + new_db_statistics = {k: v for k, v in new_db_statistics.items() if k in expected_db_statistics} expected_db_statistics = {k: dict(v) if isinstance(v, defaultdict) else v - for k, v in expected_db_statistics.iteritems()} + for k, v in expected_db_statistics.items()} self.assertEquals(new_db_statistics, expected_db_statistics) diff --git a/aiida/backends/tests/restapi.py b/aiida/backends/tests/restapi.py index 53900d9ac8..48606bbe49 100644 --- a/aiida/backends/tests/restapi.py +++ b/aiida/backends/tests/restapi.py @@ -172,7 +172,7 @@ def process_dummy_data(cls): 'structuredata': StructureData, 'data': Data, } - for label, dataclass in data_types.iteritems(): + for label, dataclass in data_types.items(): data = QueryBuilder().append(dataclass, tag="data", project=data_projections).order_by( {'data': [{'id': {'order': 'desc'}}]}).dict() data = [_['data'] for _ in data] diff --git a/aiida/backends/tests/work/process.py b/aiida/backends/tests/work/process.py index 2fe464f36f..28cc1d8967 100644 --- a/aiida/backends/tests/work/process.py +++ b/aiida/backends/tests/work/process.py @@ -105,7 +105,7 @@ def test_input_link_creation(self): inputs['store_provenance'] = True p = test_utils.DummyProcess(inputs) - for label, value in p._calc.get_inputs_dict().iteritems(): + for label, value in p._calc.get_inputs_dict().items(): self.assertTrue(label in inputs) self.assertEqual(int(label), int(value.value)) dummy_inputs.remove(label) diff --git a/aiida/backends/tests/work/test_process_builder.py b/aiida/backends/tests/work/test_process_builder.py index a7a1e5818c..21d4b478ac 100644 --- a/aiida/backends/tests/work/test_process_builder.py +++ b/aiida/backends/tests/work/test_process_builder.py @@ -47,7 +47,7 @@ def test_process_builder_attributes(self): """ Check that the builder has all the input ports of the process class as attributes """ - for name, port in self.process_class.spec().inputs.iteritems(): + for name, port in self.process_class.spec().inputs.items(): self.assertTrue(hasattr(self.builder, name)) def test_process_builder_set_attributes(self): diff --git a/aiida/backends/tests/work/work_chain.py b/aiida/backends/tests/work/work_chain.py index b9c3b07191..cd5645f450 100644 --- a/aiida/backends/tests/work/work_chain.py +++ b/aiida/backends/tests/work/work_chain.py @@ -276,7 +276,7 @@ def test_run(self): # Try the if(..) part work.run(Wf, value=A, n=three) # Check the steps that should have been run - for step, finished in Wf.finished_steps.iteritems(): + for step, finished in Wf.finished_steps.items(): if step not in ['s3', 's4', 'isB']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) @@ -284,7 +284,7 @@ def test_run(self): # Try the elif(..) part finished_steps = work.run(Wf, value=B, n=three) # Check the steps that should have been run - for step, finished in finished_steps.iteritems(): + for step, finished in finished_steps.items(): if step not in ['isA', 's2', 's4']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) @@ -292,7 +292,7 @@ def test_run(self): # Try the else... part finished_steps = work.run(Wf, value=C, n=three) # Check the steps that should have been run - for step, finished in finished_steps.iteritems(): + for step, finished in finished_steps.items(): if step not in ['isA', 's2', 'isB', 's3']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) @@ -388,7 +388,7 @@ def test_checkpointing(self): # Try the if(..) part finished_steps = self._run_with_checkpoints(Wf, inputs={'value': A, 'n': three}) # Check the steps that should have been run - for step, finished in finished_steps.iteritems(): + for step, finished in finished_steps.items(): if step not in ['s3', 's4', 'isB']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) @@ -396,7 +396,7 @@ def test_checkpointing(self): # Try the elif(..) part finished_steps = self._run_with_checkpoints(Wf, inputs={'value': B, 'n': three}) # Check the steps that should have been run - for step, finished in finished_steps.iteritems(): + for step, finished in finished_steps.items(): if step not in ['isA', 's2', 's4']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) @@ -404,7 +404,7 @@ def test_checkpointing(self): # Try the else... part finished_steps = self._run_with_checkpoints(Wf, inputs={'value': C, 'n': three}) # Check the steps that should have been run - for step, finished in finished_steps.iteritems(): + for step, finished in finished_steps.items(): if step not in ['isA', 's2', 'isB', 's3']: self.assertTrue( finished, "Step {} was not called by workflow".format(step)) diff --git a/aiida/cmdline/commands/cmd_devel.py b/aiida/cmdline/commands/cmd_devel.py index daf4a3b884..690cc3ec2a 100644 --- a/aiida/cmdline/commands/cmd_devel.py +++ b/aiida/cmdline/commands/cmd_devel.py @@ -207,7 +207,7 @@ def devel_tests(paths, verbose): # pylint: disable=too-many-locals,too-many-sta else: # Without arguments, run all tests do_db = True - for key, value in get_valid_test_paths().iteritems(): + for key, value in get_valid_test_paths().items(): if value is None: # Non-db tests test_folders.append(key) diff --git a/aiida/cmdline/commands/cmd_export.py b/aiida/cmdline/commands/cmd_export.py index 5ef6f6009d..ebc09d1395 100644 --- a/aiida/cmdline/commands/cmd_export.py +++ b/aiida/cmdline/commands/cmd_export.py @@ -261,7 +261,7 @@ def replace_requires(data): """Replace the requires keys with new module path.""" if isinstance(data, dict): new_data = {} - for key, value in data.iteritems(): + for key, value in data.items(): if key == 'requires' and value.startswith(old_start): new_data[key] = get_new_string(value) else: @@ -382,7 +382,7 @@ class NodeType(enum.Enum): # pylint: disable=too-few-public-methods # Now we migrate the entity key names i.e. removing the 'aiida.backends.djsite.db.models' prefix for field in ['unique_identifiers', 'all_fields_info']: - for old_key, new_key in entity_map.iteritems(): + for old_key, new_key in entity_map.items(): if old_key in metadata[field]: metadata[field][new_key] = metadata[field][old_key] del metadata[field][old_key] @@ -390,13 +390,13 @@ class NodeType(enum.Enum): # pylint: disable=too-few-public-methods # Replace the 'requires' keys in the nested dictionaries in 'all_fields_info' for entity in metadata['all_fields_info'].values(): for prop in entity.values(): - for key, value in prop.iteritems(): + for key, value in prop.items(): if key == 'requires' and value in entity_map: prop[key] = entity_map[value] # Replace any present keys in the data.json for field in ['export_data']: - for old_key, new_key in entity_map.iteritems(): + for old_key, new_key in entity_map.items(): if old_key in data[field]: data[field][new_key] = data[field][old_key] del data[field][old_key] diff --git a/aiida/cmdline/utils/multi_line_input.py b/aiida/cmdline/utils/multi_line_input.py index d484b9b58a..ffa94c7ba1 100644 --- a/aiida/cmdline/utils/multi_line_input.py +++ b/aiida/cmdline/utils/multi_line_input.py @@ -30,7 +30,7 @@ def edit_pre_post(pre=None, post=None, summary=None): from aiida.cmdline.utils.templates import env template = env.get_template('prepost.bash.tpl') summary = summary or {} - summary = {k: v for k, v in summary.iteritems() if v} + summary = {k: v for k, v in summary.items() if v} content = template.render(default_pre=pre or '', default_post=post or '', summary=summary) mlinput = click.edit(content, extension='.bash') if mlinput: diff --git a/aiida/common/graph.py b/aiida/common/graph.py index 2b1ff98bb4..8cb5c9131c 100644 --- a/aiida/common/graph.py +++ b/aiida/common/graph.py @@ -52,7 +52,7 @@ def draw_node_settings(node, **kwargs): shape = "shape=ellipse" if kwargs: additional_params = ",{}".format( - ",".join('{}="{}"'.format(k, v) for k, v in kwargs.iteritems())) + ",".join('{}="{}"'.format(k, v) for k, v in kwargs.items())) else: additional_params = "" if node.label: @@ -176,11 +176,11 @@ def draw_link_settings(inp_id, out_id, link_label, link_type): fd, fname = tempfile.mkstemp(suffix='.dot') with open(fname, 'w') as fout: fout.write("digraph G {\n") - for l_name, l_values in links.iteritems(): + for l_name, l_values in links.items(): fout.write(' {}\n'.format(l_values)) - for n_name, n_values in nodes.iteritems(): + for n_name, n_values in nodes.items(): fout.write(" {}\n".format(n_values)) - for n_name, n_values in additional_nodes.iteritems(): + for n_name, n_values in additional_nodes.items(): fout.write(" {}\n".format(n_values)) fout.write("}\n") diff --git a/aiida/common/log.py b/aiida/common/log.py index ff2d17f9e0..b1946fd7dd 100644 --- a/aiida/common/log.py +++ b/aiida/common/log.py @@ -182,7 +182,7 @@ def configure_logging(daemon=False, daemon_log_file=None): 'maxBytes': 100000, } - for name, logger in config.get('loggers', {}).iteritems(): + for name, logger in config.get('loggers', {}).items(): logger.setdefault('handlers', []).append(daemon_handler_name) logging.config.dictConfig(config) diff --git a/aiida/common/orbital/__init__.py b/aiida/common/orbital/__init__.py index f3505fc300..06258b14c5 100644 --- a/aiida/common/orbital/__init__.py +++ b/aiida/common/orbital/__init__.py @@ -139,7 +139,7 @@ def set_orbital_dict(self, init_dict): # Adds the module_name in hard-coded manner init_dict.update({"module_name": self._get_module_name()}) validated_dict = self._validate_keys(init_dict) - for k, v in validated_dict.iteritems(): + for k, v in validated_dict.items(): self._orbital_dict[k] = v def get_orbital_dict(self): diff --git a/aiida/common/setup.py b/aiida/common/setup.py index 8972dac3fd..28fde76ac1 100644 --- a/aiida/common/setup.py +++ b/aiida/common/setup.py @@ -525,7 +525,7 @@ def create_configuration(profile='default'): if this_existing_confs: print("The following configuration found corresponding to " "profile {}.".format(profile)) - for k, v in this_existing_confs.iteritems(): + for k, v in this_existing_confs.items(): if k in key_explanation: print("{}: {}".format(key_explanation.get(k), v)) else: diff --git a/aiida/orm/data/__init__.py b/aiida/orm/data/__init__.py index a8167af971..bd017b394f 100644 --- a/aiida/orm/data/__init__.py +++ b/aiida/orm/data/__init__.py @@ -258,7 +258,7 @@ def export(self, path, fileformat=None, overwrite=False, **kwargs): raise OSError("The file {} already exists, stopping.".format( path)) - for additional_fname, additional_fcontent in extra_files.iteritems(): + for additional_fname, additional_fcontent in extra_files.items(): retlist.append(additional_fname) with open(additional_fname, 'wb') as f: f.write(additional_fcontent) #.encode('utf-8')) # This is up to each specific plugin diff --git a/aiida/orm/data/array/bands.py b/aiida/orm/data/array/bands.py index 005948e812..ec324c8acc 100644 --- a/aiida/orm/data/array/bands.py +++ b/aiida/orm/data/array/bands.py @@ -841,7 +841,7 @@ def _matplotlib_get_dict(self, main_file_name="", comments=True, title="", legen all_data['y_max_lim'] = y_max_lim #all_data['ytick_spacing'] = ytick_spacing - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): if k not in valid_additional_keywords: raise TypeError("_matplotlib_get_dict() got an unexpected keyword argument '{}'".format( k diff --git a/aiida/orm/data/array/trajectory.py b/aiida/orm/data/array/trajectory.py index 098e119520..641a78e612 100644 --- a/aiida/orm/data/array/trajectory.py +++ b/aiida/orm/data/array/trajectory.py @@ -437,7 +437,7 @@ def _prepare_xsf(self, index=None, main_file_name=""): Write the given trajectory to a string of format XSF (for XCrySDen). """ from aiida.common.constants import elements - _atomic_numbers = {data['symbol']: num for num, data in elements.iteritems()} + _atomic_numbers = {data['symbol']: num for num, data in elements.items()} indices = list(range(self.numsteps)) if index is not None: diff --git a/aiida/orm/data/frozendict.py b/aiida/orm/data/frozendict.py index f1398a1f86..4e8f8d897c 100644 --- a/aiida/orm/data/frozendict.py +++ b/aiida/orm/data/frozendict.py @@ -34,7 +34,7 @@ def set_dict(self, dict): assert isinstance(value, Data) assert value.is_stored - for k, v in dict.iteritems(): + for k, v in dict.items(): self._set_attr(k, v.pk) def __getitem__(self, key): diff --git a/aiida/orm/data/parameter.py b/aiida/orm/data/parameter.py index c624461c14..c22f0e056e 100644 --- a/aiida/orm/data/parameter.py +++ b/aiida/orm/data/parameter.py @@ -57,7 +57,7 @@ def update_dict(self, dict): :param dict: a dictionary with the keys to substitute. It works like dict.update(), adding new keys and overwriting existing keys. """ - for k, v in dict.iteritems(): + for k, v in dict.items(): self._set_attr(k, v) def get_dict(self): diff --git a/aiida/orm/data/structure.py b/aiida/orm/data/structure.py index 27ddcba470..5485d30a88 100644 --- a/aiida/orm/data/structure.py +++ b/aiida/orm/data/structure.py @@ -39,7 +39,7 @@ _valid_symbols = tuple(i['symbol'] for i in elements.values()) _atomic_masses = {el['symbol']: el['mass'] for el in elements.values()} -_atomic_numbers = {data['symbol']: num for num, data in elements.iteritems()} +_atomic_numbers = {data['symbol']: num for num, data in elements.items()} def _get_valid_cell(inputcell): diff --git a/aiida/orm/data/upf.py b/aiida/orm/data/upf.py index e49ba1eec9..925e8ae024 100644 --- a/aiida/orm/data/upf.py +++ b/aiida/orm/data/upf.py @@ -100,7 +100,7 @@ def get_pseudos_dict(structure, family_name): # Will contain a list of all species of the pseudo with given PK pseudo_species = defaultdict(list) - for kindname, pseudo in kind_pseudo_dict.iteritems(): + for kindname, pseudo in kind_pseudo_dict.items(): pseudo_dict[pseudo.pk] = pseudo pseudo_species[pseudo.pk].append(kindname) diff --git a/aiida/orm/implementation/django/computer.py b/aiida/orm/implementation/django/computer.py index c5a9327a7d..aecb957269 100644 --- a/aiida/orm/implementation/django/computer.py +++ b/aiida/orm/implementation/django/computer.py @@ -65,7 +65,7 @@ def __init__(self, **kwargs): self.set(**kwargs) def set(self, **kwargs): - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): try: method = getattr(self, 'set_{}'.format(k)) except AttributeError: diff --git a/aiida/orm/implementation/django/group.py b/aiida/orm/implementation/django/group.py index 70928af1cb..91d51e74c4 100644 --- a/aiida/orm/implementation/django/group.py +++ b/aiida/orm/implementation/django/group.py @@ -280,14 +280,14 @@ def query(cls, name=None, type_string="", pk=None, uuid=None, nodes=None, if name_filters is not None: name_filters_list = {"name__" + k: v for (k, v) - in name_filters.iteritems() if v} + in name_filters.items() if v} queryobject &= Q(**name_filters_list) groups_pk = set(DbGroup.objects.filter( queryobject, **kwargs).values_list('pk', flat=True)) if node_attributes is not None: - for k, vlist in node_attributes.iteritems(): + for k, vlist in node_attributes.items(): if isinstance(vlist, basestring) or not isinstance( vlist, collections.Iterable): vlist = [vlist] @@ -300,7 +300,7 @@ def query(cls, name=None, type_string="", pk=None, uuid=None, nodes=None, # prepend to the key the right django string to SQL-join # on the right table query_dict = {'dbnodes__dbattributes__{}'.format(k2): v2 - for k2, v2 in base_query_dict.iteritems()} + for k2, v2 in base_query_dict.items()} # I narrow down the list of groups. # I had to do it in this way, with multiple DB hits and diff --git a/aiida/orm/implementation/django/log.py b/aiida/orm/implementation/django/log.py index c299aa6803..6f7ce55174 100644 --- a/aiida/orm/implementation/django/log.py +++ b/aiida/orm/implementation/django/log.py @@ -50,7 +50,7 @@ def find(self, filter_by=None, order_by=None, limit=None): filter_by = {} # Map the Log property names to DbLog field names - for key, value in filter_by.iteritems(): + for key, value in filter_by.items(): filters[key] = value if not order_by: diff --git a/aiida/orm/implementation/django/workflow.py b/aiida/orm/implementation/django/workflow.py index 9f13623559..5b9a995160 100644 --- a/aiida/orm/implementation/django/workflow.py +++ b/aiida/orm/implementation/django/workflow.py @@ -313,7 +313,7 @@ def set_params(self, params, force=False): def par_validate(params): the_params = {} - for k, v in params.iteritems(): + for k, v in params.items(): if any([isinstance(v, int), isinstance(v, bool), isinstance(v, float), @@ -545,7 +545,7 @@ def get_subclass_from_dbnode(cls, wf_db): except ImportError: raise InternalError("Unable to load the workflow module {}".format(module)) - for elem_name, elem in wf_mod.__dict__.iteritems(): + for elem_name, elem in wf_mod.__dict__.items(): if module_class == elem_name: # and issubclass(elem, Workflow): return getattr(wf_mod, elem_name)(uuid=wf_db.uuid) diff --git a/aiida/orm/implementation/general/calculation/job/__init__.py b/aiida/orm/implementation/general/calculation/job/__init__.py index 98013ab178..cc616558a0 100644 --- a/aiida/orm/implementation/general/calculation/job/__init__.py +++ b/aiida/orm/implementation/general/calculation/job/__init__.py @@ -325,7 +325,7 @@ def set_environment_variables(self, env_vars_dict): raise ValueError("You have to pass a " "dictionary to set_environment_variables") - for k, v in env_vars_dict.iteritems(): + for k, v in env_vars_dict.items(): if not isinstance(k, basestring) or not isinstance(v, basestring): raise ValueError( "Both the keys and the values of the " @@ -1075,7 +1075,7 @@ def _list_calculations( for k, v in [cls.projection_map[p]]: projections_dict[k].append(v) - for k, v in projections_dict.iteritems(): + for k, v in projections_dict.items(): qb.add_projection(k, v) # ORDER @@ -1560,7 +1560,7 @@ def _presubmit(self, folder, use_unstored_links=False): subst_dict = {'tot_num_mpiprocs': job_tmpl.job_resource.get_tot_num_mpiprocs()} - for k, v in job_tmpl.job_resource.iteritems(): + for k, v in job_tmpl.job_resource.items(): subst_dict[k] = v mpi_args = [arg.format(**subst_dict) for arg in computer.get_mpirun_command()] diff --git a/aiida/orm/implementation/general/node.py b/aiida/orm/implementation/general/node.py index 8d8b86f1e1..2b8147612f 100644 --- a/aiida/orm/implementation/general/node.py +++ b/aiida/orm/implementation/general/node.py @@ -56,7 +56,7 @@ def clean_value(value): return value.value elif isinstance(value, dict): # Check dictionary before iterables - return {k: clean_value(v) for k, v in value.iteritems()} + return {k: clean_value(v) for k, v in value.items()} elif (isinstance(value, collections.Iterable) and not isinstance(value, (str, unicode))): # list, tuple, ... but not a string @@ -439,7 +439,7 @@ def _set_internal(self, arguments, allow_hidden=False): raise ValueError("Cannot set {} at the same time".format( " and ".join(incomp))) - for k, v in arguments.iteritems(): + for k, v in arguments.items(): try: if allow_hidden and k.startswith("_"): method = getattr(self, '_set_{}'.format(k[1:])) @@ -814,7 +814,7 @@ def get_inputs(self, node_type=None, also_labels=False, only_in_db=False, link_t # Needed for the check input_list_keys = [i[0] for i in inputs_list] - for label, v in self._inputlinks_cache.iteritems(): + for label, v in self._inputlinks_cache.items(): src = v[0] input_link_type = v[1] if label in input_list_keys: @@ -1128,7 +1128,7 @@ def set_extras(self, the_dict): """ try: - for key, value in the_dict.iteritems(): + for key, value in the_dict.items(): self.set_extra(key, value) except AttributeError: raise AttributeError("set_extras takes a dictionary as argument") @@ -1274,7 +1274,7 @@ def iterattrs(self): # TODO: check what happens if someone stores the object while # the iterator is being used! if self._to_be_stored: - for k, v in self._attrs_cache.iteritems(): + for k, v in self._attrs_cache.items(): yield (k, v) else: for k, v in self._db_iterattrs(): diff --git a/aiida/orm/implementation/sqlalchemy/computer.py b/aiida/orm/implementation/sqlalchemy/computer.py index 7235face58..f15d078afe 100644 --- a/aiida/orm/implementation/sqlalchemy/computer.py +++ b/aiida/orm/implementation/sqlalchemy/computer.py @@ -74,7 +74,7 @@ def set(self, **kwargs): is_modified = False - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): if hasattr(self._dbcomputer, key): setattr(self._dbcomputer, key, val) else: diff --git a/aiida/orm/implementation/sqlalchemy/group.py b/aiida/orm/implementation/sqlalchemy/group.py index 92c0437aa2..d5f095f1ad 100644 --- a/aiida/orm/implementation/sqlalchemy/group.py +++ b/aiida/orm/implementation/sqlalchemy/group.py @@ -317,7 +317,7 @@ def query(cls, name=None, type_string="", pk=None, uuid=None, nodes=None, filters.append(DbGroup.user == user.dbuser) if name_filters: - for (k, v) in name_filters.iteritems(): + for (k, v) in name_filters.items(): if not v: continue if k == "startswith": diff --git a/aiida/orm/implementation/sqlalchemy/log.py b/aiida/orm/implementation/sqlalchemy/log.py index 589b622b89..e4886e6d8b 100644 --- a/aiida/orm/implementation/sqlalchemy/log.py +++ b/aiida/orm/implementation/sqlalchemy/log.py @@ -52,7 +52,7 @@ def find(self, filter_by=None, order_by=None, limit=None): filter_by = {} # Map the Log property names to DbLog field names - for key, value in filter_by.iteritems(): + for key, value in filter_by.items(): filters[key] = value columns = {} diff --git a/aiida/orm/implementation/sqlalchemy/workflow.py b/aiida/orm/implementation/sqlalchemy/workflow.py index 469cfbc1f1..737848c990 100644 --- a/aiida/orm/implementation/sqlalchemy/workflow.py +++ b/aiida/orm/implementation/sqlalchemy/workflow.py @@ -294,7 +294,7 @@ def set_params(self, params, force=False): def par_validate(params): the_params = {} - for k, v in params.iteritems(): + for k, v in params.items(): if any([isinstance(v, int), isinstance(v, bool), isinstance(v, float), diff --git a/aiida/orm/importexport.py b/aiida/orm/importexport.py index 8f57065ce4..c797fcfb93 100644 --- a/aiida/orm/importexport.py +++ b/aiida/orm/importexport.py @@ -247,7 +247,7 @@ def deserialize_attributes(attributes_data, conversion_data): if isinstance(attributes_data, dict): ret_data = {} - for k, v in attributes_data.iteritems(): + for k, v in attributes_data.items(): # print("k: ", k, " v: ", v) if conversion_data is not None: ret_data[k] = deserialize_attributes(v, conversion_data[k]) @@ -481,12 +481,12 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, # CREATE IMPORT DATA DIRECT UNIQUE_FIELD MAPPINGS # ################################################### import_unique_ids_mappings = {} - for model_name, import_data in data['export_data'].iteritems(): + for model_name, import_data in data['export_data'].items(): if model_name in metadata['unique_identifiers']: # I have to reconvert the pk to integer import_unique_ids_mappings[model_name] = { int(k): v[metadata['unique_identifiers'][model_name]] for k, v in - import_data.iteritems()} + import_data.items()} ############### # IMPORT DATA # @@ -523,8 +523,8 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, import_unique_ids})} foreign_ids_reverse_mappings[model_name] = { - k: v.pk for k, v in relevant_db_entries.iteritems()} - for k, v in data['export_data'][model_name].iteritems(): + k: v.pk for k, v in relevant_db_entries.items()} + for k, v in data['export_data'][model_name].items(): if v[unique_identifier] in relevant_db_entries.keys(): # Already in DB existing_entries[model_name][k] = v @@ -543,7 +543,7 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, unique_identifier = metadata['unique_identifiers'].get( model_name, None) - for import_entry_id, entry_data in existing_entries[model_name].iteritems(): + for import_entry_id, entry_data in existing_entries[model_name].items(): unique_id = entry_data[unique_identifier] existing_entry_id = foreign_ids_reverse_mappings[model_name][unique_id] # TODO COMPARE, AND COMPARE ATTRIBUTES @@ -565,13 +565,13 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, import_entry_ids = {} dupl_counter = 0 imported_comp_names = set() - for import_entry_id, entry_data in new_entries[model_name].iteritems(): + for import_entry_id, entry_data in new_entries[model_name].items(): unique_id = entry_data[unique_identifier] import_data = dict(deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings) - for k, v in entry_data.iteritems()) + for k, v in entry_data.items()) if Model is models.DbComputer: # Check if there is already a computer with the same @@ -642,7 +642,7 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, print("SETTING THE IMPORTED STATES FOR NEW NODES...") # I set for all nodes, even if I should set it only # for calculations - for unique_id, new_pk in just_saved.iteritems(): + for unique_id, new_pk in just_saved.items(): imported_states.append( models.DbCalcState(dbnode_id=new_pk, state=calc_states.IMPORTED)) @@ -650,7 +650,7 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, # Now I have the PKs, print the info # Moreover, set the foreing_ids_reverse_mappings - for unique_id, new_pk in just_saved.iteritems(): + for unique_id, new_pk in just_saved.items(): import_entry_id = import_entry_ids[unique_id] foreign_ids_reverse_mappings[model_name][unique_id] = new_pk if model_name not in ret_dict: @@ -667,7 +667,7 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, if model_name == NODE_ENTITY_NAME: if not silent: print("STORING NEW NODE ATTRIBUTES...") - for unique_id, new_pk in just_saved.iteritems(): + for unique_id, new_pk in just_saved.items(): import_entry_id = import_entry_ids[unique_id] # Get attributes from import file try: @@ -772,7 +772,7 @@ def import_data_dj(in_path, ignore_unknown_nodes=False, if not silent: print("STORING GROUP ELEMENTS...") import_groups = data['groups_uuid'] - for groupuuid, groupnodes in import_groups.iteritems(): + for groupuuid, groupnodes in import_groups.items(): # TODO: cache these to avoid too many queries group = models.DbGroup.objects.get(uuid=groupuuid) nodes_to_store = [dbnode_reverse_mappings[node_uuid] @@ -1027,13 +1027,13 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): # } import_unique_ids_mappings = {} # Export data since v0.3 contains the keys entity_name - for entity_name, import_data in data['export_data'].iteritems(): + for entity_name, import_data in data['export_data'].items(): # Again I need the entity_name since that's what's being stored since 0.3 if entity_name in metadata['unique_identifiers']: # I have to reconvert the pk to integer import_unique_ids_mappings[entity_name] = { int(k): v[metadata['unique_identifiers'][entity_name]] - for k, v in import_data.iteritems()} + for k, v in import_data.items()} ############### # IMPORT DATA # ############### @@ -1080,11 +1080,11 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): foreign_ids_reverse_mappings[entity_name] = { k: v.pk for k, v in - relevant_db_entries.iteritems()} + relevant_db_entries.items()} dupl_counter = 0 imported_comp_names = set() - for k, v in data['export_data'][entity_name].iteritems(): + for k, v in data['export_data'][entity_name].items(): if entity_name == COMPUTER_ENTITY_NAME: # The following is done for compatibility # reasons in case the export file was generated @@ -1148,7 +1148,7 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): fields_info = metadata['all_fields_info'].get(entity_name, {}) unique_identifier = metadata['unique_identifiers'].get(entity_name, None) - for import_entry_id, entry_data in (existing_entries[entity_name].iteritems()): + for import_entry_id, entry_data in (existing_entries[entity_name].items()): unique_id = entry_data[unique_identifier] existing_entry_id = foreign_ids_reverse_mappings[entity_name][unique_id] # TODO COMPARE, AND COMPARE ATTRIBUTES @@ -1167,13 +1167,13 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): # This is needed later to associate the import entry with the new pk import_entry_ids = dict() - for import_entry_id, entry_data in (new_entries[entity_name].iteritems()): + for import_entry_id, entry_data in (new_entries[entity_name].items()): unique_id = entry_data[unique_identifier] import_data = dict(deserialize_field( k, v, fields_info=fields_info, import_unique_ids_mappings=import_unique_ids_mappings, foreign_ids_reverse_mappings=foreign_ids_reverse_mappings) - for k, v in entry_data.iteritems()) + for k, v in entry_data.items()) # We convert the Django fields to SQLA. Note that some of # the Django fields were converted to SQLA compatible @@ -1272,7 +1272,7 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): print("SETTING THE IMPORTED STATES FOR NEW NODES...") # I set for all nodes, even if I should set it only # for calculations - for unique_id, new_pk in just_saved.iteritems(): + for unique_id, new_pk in just_saved.items(): imported_states.append( DbCalcState(dbnode_id=new_pk, state=calc_states.IMPORTED)) @@ -1281,7 +1281,7 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): # Now I have the PKs, print the info # Moreover, set the foreing_ids_reverse_mappings - for unique_id, new_pk in just_saved.iteritems(): + for unique_id, new_pk in just_saved.items(): from uuid import UUID if isinstance(unique_id, UUID): unique_id = str(unique_id) @@ -1382,7 +1382,7 @@ def import_data_sqla(in_path, ignore_unknown_nodes=False, silent=False): if not silent: print("STORING GROUP ELEMENTS...") import_groups = data['groups_uuid'] - for groupuuid, groupnodes in import_groups.iteritems(): + for groupuuid, groupnodes in import_groups.items(): # # TODO: cache these to avoid too many queries qb_group = QueryBuilder().append( Group, filters={'uuid': {'==': groupuuid}}) @@ -1522,13 +1522,13 @@ def serialize_field(data, track_conversion=False): if track_conversion: ret_data = {} ret_conversion = {} - for k, v in data.iteritems(): + for k, v in data.items(): ret_data[k], ret_conversion[k] = serialize_field( data=v, track_conversion=track_conversion) else: ret_data = {k: serialize_field(data=v, track_conversion=track_conversion) - for k, v in data.iteritems()} + for k, v in data.items()} elif isinstance(data, (list, tuple)): if track_conversion: ret_data = [] @@ -1589,7 +1589,7 @@ def serialize_dict(datadict, remove_fields=[], rename_fields={}, conversions = {} - for k, v in datadict.iteritems(): + for k, v in datadict.items(): if k not in remove_fields: # rename_fields.get(k,k): use the replacement if found in rename_fields, # otherwise use 'k' as the default value. @@ -1671,12 +1671,12 @@ def fill_in_query(partial_query, originating_entity_str, current_entity_str, # prepare the recursion for the referenced entities foreign_fields = {k: v for k, v in all_fields_info[ - current_entity_str].iteritems() - # all_fields_info[model_name].iteritems() + current_entity_str].items() + # all_fields_info[model_name].items() if 'requires' in v} new_tag_suffixes = tag_suffixes + [current_entity_str] - for k, v in foreign_fields.iteritems(): + for k, v in foreign_fields.items(): ref_model_name = v['requires'] fill_in_query(partial_query, current_entity_str, ref_model_name, new_tag_suffixes) @@ -1999,14 +1999,14 @@ def export_tree(what, folder,allowed_licenses=None, forbidden_licenses=None, export_data = dict() entity_separator = '_' - for entity_name, partial_query in entries_to_add.iteritems(): + for entity_name, partial_query in entries_to_add.items(): foreign_fields = {k: v for k, v in - all_fields_info[entity_name].iteritems() - # all_fields_info[model_name].iteritems() + all_fields_info[entity_name].items() + # all_fields_info[model_name].items() if 'requires' in v} - for k, v in foreign_fields.iteritems(): + for k, v in foreign_fields.items(): ref_model_name = v['requires'] fill_in_query(partial_query, entity_name, ref_model_name, [entity_name], entity_separator) diff --git a/aiida/plugins/entry.py b/aiida/plugins/entry.py index 0640d7d180..09b156b62e 100644 --- a/aiida/plugins/entry.py +++ b/aiida/plugins/entry.py @@ -105,7 +105,7 @@ def install(self, **opts): ic = InstallCommand() opts, args = ic.parser.parse_args() args.append(self.pip_url) - for k, v in opts.__dict__.iteritems(): + for k, v in opts.__dict__.items(): setattr(opts, k, v) req_set = ic.run(opts, args) req_set.install(opts) @@ -145,7 +145,7 @@ def test_installed(self): from aiida.plugins.entry_point import get_entry_point_names if iversion == self.version or not new_style: - for cat, ep in self.entry_points.iteritems(): + for cat, ep in self.entry_points.items(): if not set(ep).issubset(set(get_entry_point_names('aiida.' + cat))): installed = False return installed, new_style, iversion @@ -212,7 +212,7 @@ def format_info(self, **kwargs): table.append(['Package:', self.package_name]) table.append(['Description:', self.description]) table.append(['Plugins:', '']) - for category, eps in self.entry_points.iteritems(): + for category, eps in self.entry_points.items(): table.append(['', category.capitalize() + ':']) table.extend([['', ep] for ep in eps]) table.append(['', '']) diff --git a/aiida/plugins/loader.py b/aiida/plugins/loader.py index 8b824c33c9..dbe98113bc 100644 --- a/aiida/plugins/loader.py +++ b/aiida/plugins/loader.py @@ -57,7 +57,7 @@ def load_plugin(plugin_type, safe=False): if base_path.count('.') == 0: base_path = '{}.{}'.format(base_path, base_path) - for prefix, entry_point_type in type_string_to_entry_point_type_map.iteritems(): + for prefix, entry_point_type in type_string_to_entry_point_type_map.items(): if base_path.startswith(prefix): entry_point = strip_prefix(base_path, prefix) try: diff --git a/aiida/plugins/registry.py b/aiida/plugins/registry.py index d2a388ffe4..c8ed04638c 100644 --- a/aiida/plugins/registry.py +++ b/aiida/plugins/registry.py @@ -127,7 +127,7 @@ def update_info(registry=None, errorhandler=None): if not registry: registry = load_cached() - for plugin, data in registry.iteritems(): + for plugin, data in registry.items(): try: entry = Entry(**data) pickle_to_registry_cache_folder(entry, plugin) diff --git a/aiida/restapi/common/utils.py b/aiida/restapi/common/utils.py index 004a6483ac..3fe2d7f756 100644 --- a/aiida/restapi/common/utils.py +++ b/aiida/restapi/common/utils.py @@ -396,7 +396,7 @@ def make_rel_url(rel, page): if path_elems.pop(-1) == 'page' or path_elems.pop(-1) == 'page': links = [] - for (rel, page) in rel_pages.iteritems(): + for (rel, page) in rel_pages.items(): if page is not None: links.append(make_rel_url(rel, page)) headers['Link'] = ''.join(links) @@ -441,7 +441,7 @@ def build_response(self, status=200, headers=None, data=None): response.status_code = status if headers is not None: - for k, v in headers.iteritems(): + for k, v in headers.items(): response.headers[k] = v return response diff --git a/aiida/restapi/translator/base.py b/aiida/restapi/translator/base.py index 855bdfdcc2..cfe68299fe 100644 --- a/aiida/restapi/translator/base.py +++ b/aiida/restapi/translator/base.py @@ -224,10 +224,10 @@ def set_filters(self, filters={}): """ if isinstance(filters, dict): if len(filters) > 0: - for tag, tag_filters in filters.iteritems(): + for tag, tag_filters in filters.items(): if len(tag_filters) > 0 and isinstance(tag_filters, dict): self._query_help["filters"][tag] = {} - for filter_key, filter_value in tag_filters.iteritems(): + for filter_key, filter_value in tag_filters.items(): if filter_key == "pk": filter_key = pk_dbsynonym self._query_help["filters"][tag][filter_key] \ @@ -266,7 +266,7 @@ def set_projections(self, projections): """ if isinstance(projections, dict): if len(projections) > 0: - for project_key, project_list in projections.iteritems(): + for project_key, project_list in projections.items(): self._query_help["project"][project_key] = project_list else: raise InputValidationError("Pass data in dictionary format where " @@ -310,7 +310,7 @@ def def_order(columns): return order_dict ## Assign orderby field query_help - for tag, columns in orders.iteritems(): + for tag, columns in orders.items(): self._query_help['order_by'][tag] = def_order(columns) def set_query(self, filters=None, orders=None, projections=None, id=None): diff --git a/aiida/scheduler/datastructures.py b/aiida/scheduler/datastructures.py index e4b468a404..324e4c45d7 100644 --- a/aiida/scheduler/datastructures.py +++ b/aiida/scheduler/datastructures.py @@ -578,7 +578,7 @@ def serialize(self): """ import json - ser_data = {k: self.serialize_field(v, self._special_serializers.get(k, None)) for k, v in self.iteritems()} + ser_data = {k: self.serialize_field(v, self._special_serializers.get(k, None)) for k, v in self.items()} return json.dumps(ser_data) @@ -592,5 +592,5 @@ def load_from_serialized(self, data): deser_data = json.loads(data) - for key, value in deser_data.iteritems(): + for key, value in deser_data.items(): self[key] = self.deserialize_field(value, self._special_serializers.get(key, None)) diff --git a/aiida/scheduler/plugins/direct.py b/aiida/scheduler/plugins/direct.py index 79cee99a16..cde747bad5 100644 --- a/aiida/scheduler/plugins/direct.py +++ b/aiida/scheduler/plugins/direct.py @@ -164,7 +164,7 @@ def _get_submit_script_header(self, job_tmpl): lines.append("# ENVIRONMENT VARIABLES BEGIN ###") if not isinstance(job_tmpl.job_environment, dict): raise ValueError("If you provide job_environment, it must be " "a dictionary") - for key, value in job_tmpl.job_environment.iteritems(): + for key, value in job_tmpl.job_environment.items(): lines.append("export {}={}".format(key.strip(), escape_for_bash(value))) lines.append("# ENVIRONMENT VARIABLES END ###") lines.append(empty_line) diff --git a/aiida/scheduler/plugins/lsf.py b/aiida/scheduler/plugins/lsf.py index 4c57735019..3741907d1d 100644 --- a/aiida/scheduler/plugins/lsf.py +++ b/aiida/scheduler/plugins/lsf.py @@ -423,7 +423,7 @@ def _get_submit_script_header(self, job_tmpl): lines.append("# ENVIRONMENT VARIABLES BEGIN ###") if not isinstance(job_tmpl.job_environment, dict): raise ValueError("If you provide job_environment, it must be " "a dictionary") - for key, value in job_tmpl.job_environment.iteritems(): + for key, value in job_tmpl.job_environment.items(): lines.append("export {}={}".format(key.strip(), escape_for_bash(value))) lines.append("# ENVIRONMENT VARIABLES END ###") lines.append(empty_line) diff --git a/aiida/scheduler/plugins/pbsbaseclasses.py b/aiida/scheduler/plugins/pbsbaseclasses.py index d5a9cf9580..344c0f0e18 100644 --- a/aiida/scheduler/plugins/pbsbaseclasses.py +++ b/aiida/scheduler/plugins/pbsbaseclasses.py @@ -313,7 +313,7 @@ def _get_submit_script_header(self, job_tmpl): lines.append("# ENVIRONMENT VARIABLES BEGIN ###") if not isinstance(job_tmpl.job_environment, dict): raise ValueError("If you provide job_environment, it must be " "a dictionary") - for key, value in job_tmpl.job_environment.iteritems(): + for key, value in job_tmpl.job_environment.items(): lines.append("export {}={}".format(key.strip(), escape_for_bash(value))) lines.append("# ENVIRONMENT VARIABLES END ###") lines.append(empty_line) diff --git a/aiida/scheduler/plugins/sge.py b/aiida/scheduler/plugins/sge.py index 4dadd21e53..6b36305519 100644 --- a/aiida/scheduler/plugins/sge.py +++ b/aiida/scheduler/plugins/sge.py @@ -271,7 +271,7 @@ def _get_submit_script_header(self, job_tmpl): lines.append("# ENVIRONMENT VARIABLES BEGIN ###") if not isinstance(job_tmpl.job_environment, dict): raise ValueError("If you provide job_environment, it must be " "a dictionary") - for key, value in job_tmpl.job_environment.iteritems(): + for key, value in job_tmpl.job_environment.items(): lines.append("export {}={}".format(key.strip(), escape_for_bash(value))) lines.append("# ENVIRONMENT VARIABLES END ###") lines.append(empty_line) diff --git a/aiida/scheduler/plugins/slurm.py b/aiida/scheduler/plugins/slurm.py index 350e976d36..8ffd178f59 100644 --- a/aiida/scheduler/plugins/slurm.py +++ b/aiida/scheduler/plugins/slurm.py @@ -382,7 +382,7 @@ def _get_submit_script_header(self, job_tmpl): lines.append("# ENVIRONMENT VARIABLES BEGIN ###") if not isinstance(job_tmpl.job_environment, dict): raise ValueError("If you provide job_environment, it must be " "a dictionary") - for key, value in job_tmpl.job_environment.iteritems(): + for key, value in job_tmpl.job_environment.items(): lines.append("export {}={}".format(key.strip(), escape_for_bash(value))) lines.append("# ENVIRONMENT VARIABLES END ###") lines.append(empty_line) diff --git a/aiida/tools/dbexporters/tcod.py b/aiida/tools/dbexporters/tcod.py index b6f82fbf7c..fbd185f932 100644 --- a/aiida/tools/dbexporters/tcod.py +++ b/aiida/tools/dbexporters/tcod.py @@ -416,7 +416,7 @@ def _inline_to_standalone_script(calc): for key, value in {}( {} - ).iteritems(): + ).items(): value.store() """.format(code_string, function_name, args_string) @@ -912,7 +912,7 @@ def add_metadata_inline(what, node, parameters, args): loops.update(tcod_loops) for datablock in datablocks: - for k,v in dict(tags.items() + additional_tags.items()).iteritems(): + for k,v in dict(tags.items() + additional_tags.items()).items(): if not k.startswith('_'): raise ValueError("Tag '{}' does not seem to start with " "an underscode ('_'): all CIF tags must " @@ -1273,7 +1273,7 @@ def translate_calculation_specific_values(calc, translator, **kwargs): # '_tcod_atom_site_resid_force_Cartn_z': 'get_atom_site_residual_force_Cartesian_z', } tags = dict() - for tag, function in translation_map.iteritems(): + for tag, function in translation_map.items(): value = None try: value = getattr(translator, function)(calc, **kwargs) diff --git a/aiida/tools/dbimporters/baseclasses.py b/aiida/tools/dbimporters/baseclasses.py index 1ae0e7d1da..0cf06d2320 100644 --- a/aiida/tools/dbimporters/baseclasses.py +++ b/aiida/tools/dbimporters/baseclasses.py @@ -215,7 +215,7 @@ def __repr__(self): ",".join(["{}={}".format(k, '"{}"'.format(v) if issubclass(v.__class__, basestring) else v) - for k, v in self.source.iteritems()])) + for k, v in self.source.items()])) @property def contents(self): diff --git a/aiida/tools/dbimporters/plugins/icsd.py b/aiida/tools/dbimporters/plugins/icsd.py index 52b9f04e23..e108b1440d 100644 --- a/aiida/tools/dbimporters/plugins/icsd.py +++ b/aiida/tools/dbimporters/plugins/icsd.py @@ -380,7 +380,7 @@ def _query_sql_db(self, **kwargs): sql_where_query = [] # second part of sql query - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): if not isinstance(v, list): v = [v] sql_where_query.append("({})".format(self.keywords_db[k][1](self, @@ -418,7 +418,7 @@ def _queryweb(self, **kwargs): "mineral": "" } - for k, v in kwargs.iteritems(): + for k, v in kwargs.items(): try: realname = self.keywords[k][0] newv = self.keywords[k][1](k, v) diff --git a/aiida/transport/plugins/test_all_plugins.py b/aiida/transport/plugins/test_all_plugins.py index c3793a001c..fd2b3daa33 100644 --- a/aiida/transport/plugins/test_all_plugins.py +++ b/aiida/transport/plugins/test_all_plugins.py @@ -85,7 +85,7 @@ def test_all_plugins(self): The wrapper function that calls the subfunction for each transport. """ exceptions = [] - for tr_name, custom_transport in all_custom_transports.iteritems(): + for tr_name, custom_transport in all_custom_transports.items(): try: actual_test_method(self, custom_transport) except Exception as e: diff --git a/aiida/utils/serialize.py b/aiida/utils/serialize.py index 0d0e1eb186..b44b0f6992 100644 --- a/aiida/utils/serialize.py +++ b/aiida/utils/serialize.py @@ -69,11 +69,11 @@ def serialize_data(data): elif isinstance(data, uuid.UUID): return '{}{}'.format(_PREFIX_VALUE_UUID, data) elif isinstance(data, AttributeDict): - return AttributeDict({encode_key(key): serialize_data(value) for key, value in data.iteritems()}) + return AttributeDict({encode_key(key): serialize_data(value) for key, value in data.items()}) elif isinstance(data, AttributesFrozendict): - return AttributesFrozendict({encode_key(key): serialize_data(value) for key, value in data.iteritems()}) + return AttributesFrozendict({encode_key(key): serialize_data(value) for key, value in data.items()}) elif isinstance(data, collections.Mapping): - return {encode_key(key): serialize_data(value) for key, value in data.iteritems()} + return {encode_key(key): serialize_data(value) for key, value in data.items()} elif isinstance(data, collections.Sequence) and not isinstance(data, (str, unicode)): return [serialize_data(value) for value in data] else: @@ -90,11 +90,11 @@ def deserialize_data(data): :return: the deserialized data with keys decoded and node instances loaded from UUID's """ if isinstance(data, AttributeDict): - return AttributeDict({decode_key(key): deserialize_data(value) for key, value in data.iteritems()}) + return AttributeDict({decode_key(key): deserialize_data(value) for key, value in data.items()}) elif isinstance(data, AttributesFrozendict): - return AttributesFrozendict({decode_key(key): deserialize_data(value) for key, value in data.iteritems()}) + return AttributesFrozendict({decode_key(key): deserialize_data(value) for key, value in data.items()}) elif isinstance(data, collections.Mapping): - return {decode_key(key): deserialize_data(value) for key, value in data.iteritems()} + return {decode_key(key): deserialize_data(value) for key, value in data.items()} elif isinstance(data, collections.Sequence) and not isinstance(data, (str, unicode)): return [deserialize_data(value) for value in data] elif isinstance(data, (str, unicode)) and data.startswith(_PREFIX_VALUE_NODE): diff --git a/aiida/work/job_processes.py b/aiida/work/job_processes.py index dfd5bb395f..15fe2f25eb 100644 --- a/aiida/work/job_processes.py +++ b/aiida/work/job_processes.py @@ -448,7 +448,7 @@ def define(cls_, spec): help='Set a string for the output parser. Can be None if no output plugin is available or needed') # Define the actual inputs based on the use methods of the calculation class - for key, use_method in calc_class._use_methods.iteritems(): + for key, use_method in calc_class._use_methods.items(): valid_type = use_method['valid_types'] docstring = use_method.get('docstring', None) @@ -535,7 +535,7 @@ def _setup_db_inputs(self): if isinstance(port, PortNamespace): additional = self._calc_class._use_methods[name]['additional_parameter'] - for k, v in input_value.iteritems(): + for k, v in input_value.items(): try: getattr(self.calc, 'use_{}'.format(name))(v, **{additional: k}) except AttributeError: @@ -615,7 +615,7 @@ def retrieved(self, retrieved_temporary_folder=None): raise # Finally link up the outputs and we're done - for label, node in self.calc.get_outputs_dict().iteritems(): + for label, node in self.calc.get_outputs_dict().items(): self.out(label, node) return exit_code diff --git a/aiida/work/processes.py b/aiida/work/processes.py index 2c60eb3e90..4358e35e1d 100644 --- a/aiida/work/processes.py +++ b/aiida/work/processes.py @@ -145,7 +145,7 @@ def save_instance_state(self, out_state, save_context): out_state[self.SaveKeys.CALC_ID.value] = self.pid def get_provenance_inputs_iterator(self): - return filter(lambda kv: not kv[0].startswith('_'), self.inputs.iteritems()) + return filter(lambda kv: not kv[0].startswith('_'), self.inputs.items()) @override def load_instance_state(self, saved_state, load_context): @@ -458,7 +458,7 @@ def _setup_db_inputs(self): """ parent_calc = self.get_parent_calc() - for name, input_value in self._flat_inputs().iteritems(): + for name, input_value in self._flat_inputs().items(): if isinstance(input_value, Calculation): input_value = utils.get_or_create_output_group(input_value) @@ -565,7 +565,7 @@ def exposed_inputs(self, process_class, namespace=None, agglomerate=True): # Get the list of ports that were exposed for the given Process class in the current namespace exposed_inputs_list = self.spec()._exposed_inputs[namespace][process_class] - for name, port in port_namespace.ports.iteritems(): + for name, port in port_namespace.ports.items(): if name in inputs and name in exposed_inputs_list: exposed_inputs[name] = inputs[name] @@ -779,7 +779,7 @@ def _run(self): if isinstance(result, Data): self.out(self.SINGLE_RETURN_LINKNAME, result) elif isinstance(result, collections.Mapping): - for name, value in result.iteritems(): + for name, value in result.items(): self.out(name, value) else: raise TypeError( diff --git a/aiida/work/workchain.py b/aiida/work/workchain.py index 19acc7b3cd..0af77c5e1f 100644 --- a/aiida/work/workchain.py +++ b/aiida/work/workchain.py @@ -129,7 +129,7 @@ def to_context(self, **kwargs): a user to add multiple intersteps that will assign a certain value to the corresponding key in the context of the workchain """ - for key, value in kwargs.iteritems(): + for key, value in kwargs.items(): awaitable = construct_awaitable(value) awaitable.key = key self.insert_awaitable(awaitable) diff --git a/examples/work/common.py b/examples/work/common.py index 8255031699..4fb0d7f7f7 100644 --- a/examples/work/common.py +++ b/examples/work/common.py @@ -59,7 +59,7 @@ def get_pseudos(structure, family_name): # Will contain a list of all species of the pseudo with given PK pseudo_species = defaultdict(list) - for kindname, pseudo in kind_pseudo_dict.iteritems(): + for kindname, pseudo in kind_pseudo_dict.items(): pseudo_dict[pseudo.pk] = pseudo pseudo_species[pseudo.pk].append(kindname) diff --git a/examples/work/scratch.py b/examples/work/scratch.py index 3ff1fb852d..373bc33cdb 100644 --- a/examples/work/scratch.py +++ b/examples/work/scratch.py @@ -101,5 +101,5 @@ def launch_calculations(self): self.insert_barrier(Calc(pid)) def finalise(self): - for s, pid in self.ctx.launched.iteritems(): + for s, pid in self.ctx.launched.items(): self.out(s, load_node(pid)['output_parameters'].dict) \ No newline at end of file