Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update CondDBFW to Python3 #35268

Merged
merged 1 commit into from Sep 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 2 additions & 4 deletions CondCore/Utilities/python/CondDBFW/command_line.py
Expand Up @@ -5,8 +5,6 @@
Works by taking the main keyword (first command given to the script),
passing that to the function that will deal with that action, along with the following arguments as parameters for that function.
"""
from __future__ import print_function
from __future__ import absolute_import

from . import querying
import argparse
Expand Down Expand Up @@ -112,7 +110,7 @@ def copy_tag(arguments):
# set end_of_validity to -1 because sqlite doesn't support long ints
source_tag.end_of_validity = -1
source_tag.name = arguments.dest_tag
source_tag.modification_time = datetime.datetime.now()
source_tag.modification_time = datetime.datetime.utcnow()

# create new iovs
new_iovs = []
Expand Down Expand Up @@ -154,7 +152,7 @@ def copy_global_tag(arguments):
tags = source_connection.tag(name=tags)

# copy global tag first
global_tag.insertion_time = datetime.datetime.now()
global_tag.insertion_time = datetime.datetime.utcnow()
global_tag.validity = -1
dest_connection.write_and_commit(global_tag)

Expand Down
17 changes: 8 additions & 9 deletions CondCore/Utilities/python/CondDBFW/data_formats.py
Expand Up @@ -6,7 +6,6 @@
Note: may also contain a decorator that can wrap a class around a function that contains a script (future development).

"""
from __future__ import absolute_import

from .data_sources import json_data_node, json_list, json_dict, json_basic

Expand All @@ -30,7 +29,7 @@ def to_datatables(script):
def new_script(self, connection):
try:
data = script(self, connection)
if(isinstance(data, list)):
if(type(data) == list):
data = _json_data_node.make(data)
return to_datatables(data)
except (KeyError, TypeError) as e:
Expand Down Expand Up @@ -64,19 +63,19 @@ def _to_array_of_dicts(data):
headers = data.get("headers").data()
data_list = data.get("data").data()
def unicode_to_str(string):
return str(string) if isinstance(string, unicode) else string
headers = map(unicode_to_str, headers)
return str(string) if type(string) == str else string
headers = list(map(unicode_to_str, headers))
def row_to_dict(row):
row = map(unicode_to_str, row)
return dict(zip(headers, row))
array_of_dicts = map(row_to_dict, data_list)
row = list(map(unicode_to_str, row))
return dict(list(zip(headers, row)))
array_of_dicts = list(map(row_to_dict, data_list))
return json_data_node.make(array_of_dicts)

def _to_datatables(data):
headers = map(str, data.get(0).data().keys())
headers = list(map(str, list(data.get(0).data().keys())))
new_data = []
for n in range(0, len(data.data())):
new_data.append(map(lambda entry : str(entry) if isinstance(entry, unicode) else entry, data.get(n).data().values()))
new_data.append([str(entry) if type(entry) == str else entry for entry in list(data.get(n).data().values())])
return json_data_node.make({
"headers" : headers,
"data" : new_data
Expand Down
28 changes: 13 additions & 15 deletions CondCore/Utilities/python/CondDBFW/data_sources.py
Expand Up @@ -3,8 +3,6 @@
This file contains the base DataSource class, and all sub classes that implement their own methods for parsing data.

"""
from __future__ import print_function
from __future__ import absolute_import

import json

Expand Down Expand Up @@ -107,7 +105,7 @@ def __init__(self, sqlite_file_name):
sql_query = "select %s from %s" % (column_string, table)
results = cursor.execute(sql_query).fetchall()
for n in range(0, len(results)):
results[n] = dict(zip(table_to_columns[table], map(str, results[n])))
results[n] = dict(list(zip(table_to_columns[table], list(map(str, results[n])))))
table_to_data[str(table)] = results
self._data = json_data_node.make(table_to_data)
else:
Expand All @@ -129,9 +127,9 @@ def __init__(self, data=None):
# be created in code that shouldn't be doing it.
@staticmethod
def make(data):
if isinstance(data, list):
if type(data) == list:
return json_list(data)
elif isinstance(data, dict):
elif type(data) == dict:
return json_dict(data)
else:
return json_basic(data)
Expand Down Expand Up @@ -159,12 +157,12 @@ def find(self, type_name):
# traverse json_data_node structure, and find all lists
# if this node in the structure is a list, return all sub lists
lists = []
if isinstance(self._data, type_name):
if type(self._data) == type_name:
lists.append(self._data)
if isinstance(self._data, list):
if type(self._data) == list:
for item in self._data:
lists += json_data_node.make(item).find(type_name)
elif isinstance(self._data, dict):
elif type(self._data) == dict:
for key in self._data:
lists += json_data_node.make(self._data[key]).find(type_name)
return lists
Expand Down Expand Up @@ -198,7 +196,7 @@ def add_child(self, data):
def __iter__(self):
return self

def next(self):
def __next__(self):
if self.iterator_index > len(self._data)-1:
self.reset()
raise StopIteration
Expand Down Expand Up @@ -227,13 +225,13 @@ def indices(self, *indices):

def get_members(self, member_name):
# assume self.data() is a list
if not(type(member_name) in [str, unicode]):
if not(type(member_name) in [str, str]):
raise TypeError("Value given for member name must be a string.")
type_of_first_item = self.data()[0].__class__
for item in self.data():
if item.__class__ != type_of_first_item:
return None
return json_data_node.make(map(lambda item : getattr(item, member_name), self.data()))
return json_data_node.make([getattr(item, member_name) for item in self.data()])

# format methods

Expand All @@ -245,7 +243,7 @@ def as_dicts(self, convert_timestamps=False):

if self.get(0).data().__class__.__name__ in ["GlobalTag", "GlobalTagMap", "Tag", "IOV", "Payload"]:
# copy data
new_data = map(lambda item : item.as_dicts(convert_timestamps=convert_timestamps), [item for item in self.data()])
new_data = [item.as_dicts(convert_timestamps=convert_timestamps) for item in [item for item in self.data()]]
return new_data
else:
print("Data in json_list was not the correct type.")
Expand Down Expand Up @@ -281,7 +279,7 @@ def as_table(self, fit=["all"], columns=None, hide=None, col_width=None, row_num
table_name = None
data = self.data()
# gets headers stored in first dictionary
headers = data[0].keys()
headers = list(data[0].keys())

if columns != None:
headers = columns
Expand All @@ -298,7 +296,7 @@ def as_table(self, fit=["all"], columns=None, hide=None, col_width=None, row_num

if col_width == None:
import subprocess
table_width = int(0.95*int(subprocess.check_output(["stty", "size"]).split(" ")[1]))
table_width = int(0.95*int(subprocess.check_output([b'stty', b'size']).split(b' ')[1]))
col_width = int(table_width/len(headers))

if hide != None:
Expand Down Expand Up @@ -335,7 +333,7 @@ def cell(content, header, col_width, fit):
for column in fit:

if not(column in headers):
print("'%s' is not a valid column." % column)
print(("'%s' is not a valid column." % column))
return

column_to_width[column] = max_width_of_column(column, data)
Expand Down