Skip to content
Permalink
Branch: master
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
9646 lines (7458 sloc) 263 KB
#
# Collective Knowledge (CK)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin
#
# CK kernel - we made it monolithic with a minimal set
# of common functions for performance reasons
__version__ = "1.9.7.1" # We use 3 digits for the main (released) version and 4th digit for development revision
# Do not use characters (to detect outdated version)!
# Extra modules global for the whole kernel
import sys
import json
import os
import imp # Loading Python modules
initialized=False # True if initialized
allow_print=True # Needed to suppress all output
con_encoding='' # Use non-default console encoding
cfg={
"name":"Collective Knowledge",
"desc":"exposing ad-hoc experimental setups to extensible repository and big data predictive analytics",
"cmd":"ck <action> $#module_uoa#$ (cid1/uid1) (cid2/uid2) (cid3/uid3) key_i=value_i ... @file.json",
"wiki_data_web":"https://github.com/ctuning/ck/wiki/ckb_", # Collective Knowledge Base (ckb)
"private_wiki_data_web":"https://github.com/ctuning/ck/wiki/ckb_", # Collective Knowledge Base (ckb)
"api_web":"http://cknowledge.org/soft/docs/",
"status_url":"https://raw.githubusercontent.com/ctuning/ck/master/setup.py",
"help_examples":" Example of obtaining, compiling and running a shared benchmark on Linux with GCC:\n $ ck pull repo:ctuning-programs\n $ ck compile program:cbench-automotive-susan --speed\n $ ck run program:cbench-automotive-susan\n\n Example of an interactive CK-powered article:\n http://cknowledge.org/repo\n",
"help_web":" Documentation and Getting Started Guide:\n https://github.com/ctuning/ck/wiki",
"ck_web":"https://github.com/ctuning/ck",
"ck_web_wiki":"https://github.com/ctuning/ck/wiki",
"default_shared_repo_url":"https://github.com/ctuning",
"github_repo_url":"https://github.com",
# "default_license":"See CK LICENSE.txt for licensing details",
# "default_copyright":"See CK COPYRIGHT.txt for copyright details",
# "default_developer":"cTuning foundation",
# "default_developer_email":"admin@cTuning.org",
# "default_developer_webpage":"http://cTuning.org",
"detect_cur_cid":"#",
"detect_cur_cid1":"^",
"error":"CK error: ",
"json_sep":"*** ### --- CK JSON SEPARATOR --- ### ***",
"default_module":"data",
"module_name":"module",
"module_uids":["032630d041b4fd8a"],
"repo_name":"repo",
"module_code_name":"module",
"module_full_code_name":"module.py",
"env_key_root":"CK_ROOT",
"env_key_local_repo":"CK_LOCAL_REPO",
"env_key_local_kernel_uoa":"CK_LOCAL_KERNEL_UOA",
"env_key_repos":"CK_REPOS",
"subdir_default_repos":"repos",
"user_home_dir_ext":"CK", # if no path to repos is defined, use user home dir with this extension
"kernel_dir":"ck",
"kernel_dirs":["ck",""],
"file_kernel_py":"ck/kernel.py",
"subdir_default_repo":"repo",
"subdir_kernel":"kernel",
"subdir_kernel_default":"default",
"subdir_ck_ext":".cm", # keep compatibility with Collective Mind V1.x
"file_for_lock":"ck_lock.txt",
"special_directories":[".cm", ".svn", ".git"], # special directories that should be ignored when copying/moving entries
"ignore_directories_when_archive_repo":[".svn", ".git"],
"file_meta_old":"data.json", # keep compatibility with Collective Mind V1.x
"file_meta":"meta.json",
"file_info":"info.json",
"file_desc":"desc.json",
"file_updates":"updates.json",
"file_alias_a": "alias-a-",
"file_alias_u": "alias-u-",
"linux_sudo":"sudo",
"install_ck_as_lib":"python setup.py install",
"repo_file":".ckr.json",
"file_cache_repo_uoa":".ck.cache_repo_uoa.json",
"file_cache_repo_info":".ck.cache_repo_info.json",
"default_host":"localhost",
"default_port":"3344",
"detached_console":{"win":{"cmd":"start $#cmd#$", "use_create_new_console_flag":"yes"},
"linux":{"cmd":"xterm -hold -e \"$#cmd#$\""}},
"batch_extension":{"win":".bat",
"linux":".sh"},
"default_archive_name":"ck-archive.zip",
# TODO: remove "http://"?
"index_host":"http://localhost",
"index_port":"9200",
"index_use_curl":"no",
"wfe_template":"default",
"module_repo_name":"repo",
"repo_name_default":"default",
"repo_uid_default":"604419a9fcc7a081",
"repo_name_local":"local",
"repo_uid_local":"9a3280b14a4285c9",
"default_exchange_repo_uoa":"remote-ck",
"default_exchange_subrepo_uoa":"upload",
"external_editor":{"win":"wordpad $#filename#$",
"linux":"vim $#filename#$"},
"shell":{"linux":{
"redirect_stdout":">",
"env_separator": ";"
},
"win": {
"redirect_stdout":">",
"env_separator": "&&"
}
},
"forbid_global_delete": "no",
"forbid_global_writing": "no",
"forbid_writing_modules": "no",
"forbid_writing_to_default_repo": "no",
"forbid_writing_to_local_repo": "no",
"allow_writing_only_to_allowed": "no",
"allow_run_only_from_allowed_repos": "no",
"repo_uids_to_allow_run":["604419a9fcc7a081",
"9a3280b14a4285c9",
"76c4424a1473c873",
"a4328ba99679e0d1",
"7fd7e76e13f4cd6a",
"215d441c19db1fed",
"43eaa6c2d1892c32"],
"use_indexing": "no",
"internal_keys": [
"action",
"repo_uoa",
"module_uoa",
"data_uoa",
"cid",
"cids",
"cid1",
"cid2",
"cid3",
"xcids",
"unparsed_cmd",
"con_encoding",
"ck_profile",
"out",
"out_file"
],
"repo_types":{
"git":{
"clone":"git clone $#url#$ $#path#$",
"pull":"git pull",
"push":"git push",
"add":"git add $#files#$",
"rm":"git rm -rf $#files#$",
"commit":"git commit *",
"version":"git --version",
"checkout":"git checkout $#id#$"
}
},
"actions":{
"uid":{"desc":"generate UID", "for_web": "yes"},
"version":{"desc":"print CK version", "for_web": "yes"},
"python_version":{"desc":"print python version used by CK", "for_web": "no"},
"status":{"desc":"check CK version status", "for_web": "yes"},
"copy_path_to_clipboard":{"desc":"copy current path to clipboard", "for_web": "no"},
"wiki":{"desc":"<CID> open discussion wiki page for a given entry"}, # Collective Knowledge Base (ckb)
"pwiki":{"desc":"<CID> open private discussion wiki page for a given entry"},
"help":{"desc":"<CID> print help about data (module) entry"},
"short_help":{"desc":"<CID> print short help about CK"},
"webhelp":{"desc":"<CID> open browser with online help (description) for a data (module) entry"},
"webapi":{"desc":"<CID> open browser with online API for a given module, if exists"},
"guide":{"desc":"open CK wiki with user/developer guides"},
"info":{"desc":"<CID> print help about module"},
"browser":{"desc":"start CK web service and open browser"},
"add":{"desc":"<CID> add entry", "for_web":"yes"},
"update":{"desc":"<CID> update entry", "for_web":"yes"},
"load":{"desc":"<CID> load meta description of entry", "for_web": "yes"},
"edit":{"desc":"<CID> edit entry description using external editor", "for_web":"no"},
"zip":{"desc":"<CID> zip entries", "for_web":"no"},
"find":{"desc":"<CID> find path to entry"},
"cd":{"desc":"<CID> print 'cd {path to entry}'"},
"cdc":{"desc":"<CID> print 'cd {path to entry} and copy to clipboard, if supported"},
"path":{"desc":"<CID> detect CID in the current directory"},
"cid":{"desc":"<CID> get CID of the current entry"},
"rm":{"desc":"<CID> delete entry", "for_web":"yes"},
"remove":{"desc":"see 'rm'", "for_web":"yes"},
"delete":{"desc":"see 'rm'", "for_web":"yes"},
"ren":{"desc":"<CID> <new name) (data_uid) (remove_alias) rename entry", "for_web":"yes"},
"rename":{"desc":"see 'ren' function", "for_web":"yes"},
"cp":{"desc":"<CID> <CID1> copy entry", "for_web":"yes"},
"copy":{"desc":"see 'cp'", "for_web":"yes"},
"mv":{"desc":"<CID> <CID1> move entry", "for_web":"yes"},
"move":{"desc":"see 'mv'", "for_web":"yes"},
"list_files":{"desc":" list files recursively in a given entry", "for_web": "yes"},
"delete_file":{"desc":"<file> delete file from a given entry", "for_web":"yes"},
"list":{"desc":"<CID> list entries", "for_web": "yes"},
"ls":{"desc":"see 'list'", "for_web": "yes"},
"search":{"desc":"<CID> search entries", "for_web": "yes"},
"pull":{"desc":"<CID> (filename) or (empty to get the whole entry as archive) pull file from entry"},
"push":{"desc":"<CID> (filename) push file to entry"},
"add_action":{"desc":"add action (function) to existing module"},
"remove_action":{"desc":"remove action (function) from existing module"},
"list_actions":{"desc":"list actions (functions) in existing module", "for_web":"yes"},
"add_index":{"desc":"<CID> add index"},
"delete_index":{"desc":"<CID> remove index"},
"convert_cm_to_ck":{"desc":"<CID> convert old CM entries to CK entries"},
"create_entry":{"desc":"<directory> create an entry for a given directory name"},
"get_api":{"desc":"--func=<func> print API of a function in a given module"},
"print_input":{"desc":"prints input"},
},
"actions_redirect":{"list":"list_data",
"ls":"list_data"},
"common_actions":["webhelp", "webapi", "help", "info", "print_input",
"wiki",
"path", "find", "cid", "cd", "cdc",
"browser",
"add",
"edit",
"load",
"zip",
"rm", "remove", "delete",
"update",
"ren", "rename",
"cp", "copy",
"mv", "move",
"ls",
"list",
"search",
"pull",
"push",
"list_files",
"delete_file",
"add_action",
"remove_action",
"list_actions",
"create_entry",
"add_index",
"delete_index",
"get_api",
"convert_cm_to_ck"]
}
work={
"env_root":"", # Path to CK installation
"dir_default_repo":"",
"dir_default_repo_path":"",
"dir_default_kernel":"",
"dir_default_cfg":"",
"dir_local_repo":"",
"dir_local_repo_path":"",
"dir_local_kernel":"",
"dir_local_cfg":"",
"local_kernel_uoa":"",
"dir_work_repo":"",
"dir_work_repo_path":"",
"dir_work_cfg":"",
"dir_repos":"",
"dir_cache_repo_uoa":"",
"dir_cache_repo_info":"",
"repo_name_work":"",
"repo_uid_work":"",
'cached_module_by_path':{},
'cached_module_by_path_last_modification':{}
}
paths_repos=[] # First path to local repo (if exist), than global
cache_repo_init=False # True, if initialized
paths_repos_all=[] # Path to all repos
cache_repo_uoa={} # Disambiguate repo UOA to repo UID
cache_repo_info={} # Cache repo info with path and type
type_long=None # In Python 3 -> int, in Python 2 -> long
string_io=None # StringIO, which is imported differently in Python 2 and 3
##############################################################################
# Universal print of unicode string in utf8 that supports Python 2.x and 3.x
#
# TARGET: end users
def out(s):
"""
Input: s - unicode string to print
Output: Nothing
"""
if allow_print:
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=s.encode()
else:
b=s.encode(x, 'ignore')
else:
b=s.encode(con_encoding, 'ignore')
if sys.version_info[0]>2:
try: # We encountered issues on ipython with Anaconda
# and hence made this work around
sys.stdout.buffer.write(b)
sys.stdout.buffer.write(b'\n')
except Exception as e:
print(s)
pass
else:
print(b)
sys.stdout.flush()
return None
##############################################################################
# Universal debug print of a dictionary (removing unprintable parts)
#
# TARGET: end users
def debug_out(i):
"""
Input: i - dictionary
Output: return = 0
"""
import copy
import json
ii={}
# Check main unprintable keys
for k in i:
try:
s=json.dumps(i[k])
except Exception as e:
pass
else:
ii[k]=i[k]
# Dump
out(json.dumps(ii, indent=2))
return {'return':0}
##############################################################################
# Universal print of unicode error string in utf8 that supports Python 2.x and 3.x to stderr
#
# TARGET: end users
def eout(s):
"""
Input: s - unicode string to print
Output: Nothing
"""
if allow_print:
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=s.encode()
else:
b=s.encode(x, 'ignore')
else:
b=s.encode(con_encoding, 'ignore')
if sys.version_info[0]>2:
try: # We encountered issues on ipython with Anaconda
# and hence made this work around
sys.stderr.buffer.write(b)
sys.stderr.buffer.write(b'\n')
except Exception as e:
sys.stderr.write(s)
pass
else:
sys.stderr.write(b)
sys.stderr.flush()
return None
##############################################################################
# Universal error print and exit
#
# TARGET: end users
def err(r):
"""
Input: {
return - return code
error - error text
}
Output: Nothing; quits program
"""
import sys
rc=r['return']
re=r['error']
out('Error: '+re)
sys.exit(rc)
##############################################################################
# Universal error print for Jupyter Notebook with raise KeyboardInterrupt
#
# TARGET: end users
def jerr(r):
"""
Input: {
return - return code
error - error text
}
Output: Nothing; quits program
"""
rc=r['return']
re=r['error']
out('Error: '+re)
raise KeyboardInterrupt
##############################################################################
# Support function for safe float (useful for sorting function)
#
# TARGET: end users
def safe_float(i,d):
r=d
try:
r=float(i)
except Exception as e:
pass
return r
##############################################################################
# Support function for safe int (useful for sorting function)
#
# TARGET: end users
def safe_int(i,d):
r=d
try:
r=int(i)
except Exception as e:
pass
return r
##############################################################################
# Support function to get value from list without error if out of bounds
# (useful for various sorting)
#
# TARGET: end users
def safe_get_val_from_list(lst, index, default_value):
v=default_value
if index<len(lst):
v=lst[index]
return v
##############################################################################
# Support function for system_with_timeout
#
# TARGET: end users
def system_with_timeout_kill(proc):
# First via psutil (works better on Windows but may not be installed)
loaded=True
try:
import psutil
except ImportError:
loaded=False
pass
if loaded: # pragma: no cover
try:
pid=proc.pid
p=psutil.Process(pid)
pc=p.get_children(recursive=True)
for px in pc:
px.kill()
p.kill()
except Exception as e:
loaded=False
pass
# Try traditional way
if not loaded:
try:
proc.terminate()
except Exception as e:
pass
return
##############################################################################
# Substituting os.system with possibility for time out
#
# TARGET: end users
def system_with_timeout(i):
"""
Input: {
cmd - command line
(timeout) - timeout in seconds (granularity 0.01 sec) - may cause overheads ...
}
Output: {
return - return code = 0, if successful
> 0, if error
= 8, if timeout
(error) - error text if return > 0
return_code - return code from app
}
"""
import subprocess
import time
cmd=i['cmd']
rc=0
to=i.get('timeout','')
p=subprocess.Popen(cmd, shell=True)
if to != '':
xto=float(to)
t0=time.time()
t=0
tx=float(i['timeout'])
while p.poll() == None and t<xto:
time.sleep(0.1)
t=time.time()-t0
if t>=xto and p.poll()==None:
system_with_timeout_kill(p)
return {'return':8, 'error':'process timed out and had been terminated'}
else:
p.wait()
rc=p.returncode
return {'return':0, 'return_code':rc}
##############################################################################
# Run command and get stdout
#
# TARGET: end users
def run_and_get_stdout(i):
"""
Input: {
cmd - list of command line arguments, starting with the command itself
(shell) - if 'yes', reuse shell environment
}
Output: {
return - return code = 0, if successful
> 0, if error
= 8, if timeout
(error) - error text if return > 0
return_code - return code from app
stdout - string, standard output of the command
}
"""
import subprocess
import shlex
import platform
cmd=i['cmd']
if type(cmd)!=list:
# Split only on non-Windows platforms (since Windows takes a string in Popen)
if not platform.system().lower().startswith('win'):
cmd=shlex.split(cmd)
xshell=False
if i.get('shell','')=='yes':
xshell=True
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=xshell)
output, error = p1.communicate()
if sys.version_info[0]>2:
output = output.decode(encoding='UTF-8')
error = error.decode(encoding='UTF-8')
return {'return':0, 'return_code':p1.returncode, 'stdout':output, 'stderr':error}
##############################################################################
# Get value from one dict, remove it from there and move to another
#
# TARGET: end users
def get_from_dicts(dict1, key, default_value, dict2, extra=''):
"""
Input: dict1 - first check in this dict (and remove if there)
key - key in dict1
default_value - default value if not found
dict2 - then check from here
Output: value
"""
value=default_value
if key not in dict1:
if dict2!=None:
value=dict2.get(extra+key, default_value)
else:
value=dict1[key]
del(dict1[key])
if dict2!=None:
dict2[extra+key]=value
return value
##############################################################################
# Converting iso text time to datetime object
#
# TARGET: end users
def convert_iso_time(i):
"""
Input: {
iso_datetime - iso date time
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(datetime_obj) - datetime object
}
"""
t=i['iso_datetime']
import datetime
import time
dto=None
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S.%f")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%dT%H")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m-%d")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y-%m")
except Exception as e:
ok=False
pass
if not ok:
ok=True
try: dto=datetime.datetime.strptime(t, "%Y")
except Exception as e:
return {'return':1, 'error':'can\'t parse ISO date time: '+t}
return {'return':0, 'datetime_obj':dto}
##############################################################################
# Safe convert dict str keys to int to be sorted
#
# TARGET: end users
def convert_str_key_to_int(key):
try:
return int(key)
except ValueError:
return 0
##############################################################################
# Universal input of unicode string in utf8 that supports Python 2.x and 3.x
#
# TARGET: end users
def inp(i):
"""
Input: {
text - text to print
}
Output: {
return - return code = 0
string - input string
}
"""
t=i['text']
if con_encoding=='':
x=sys.stdin.encoding
if x==None:
b=t.encode()
else:
b=t.encode(x, 'ignore')
else:
b=t.encode(con_encoding, 'ignore') # pragma: no cover
if sys.version_info[0]>2:
try: b=b.decode(sys.stdin.encoding)
except Exception as e:
try: b=b.decode('utf8')
except Exception as e: pass
if sys.version_info[0]>2:
s=input(b)
else:
x=sys.stdin.encoding
if x==None:
x='utf8'
s=raw_input(b).decode(x).encode('utf8')
return {'return':0, 'string':s}
##############################################################################
# Universal selector of dictionary entry
#
# TARGET: end users (advanced version available in module "choice")
def select(i):
"""
Input: {
dict - dict with values being dicts with 'name' as string to display and 'sort' as int (for ordering)
(title) - print title
(error_if_empty) - if 'yes' and Enter, make error
(skip_sort) - if 'yes', do not sort array
}
Output: {
return - return code = 0
string - selected dictionary key
}
"""
s=''
title=i.get('title','')
if title!='':
out(title)
out('')
d=i['dict']
if i.get('skip_sort','')!='yes':
kd=sorted(d, key=lambda v: d[v].get('sort',0))
else:
kd=d
j=0
ks={}
for k in kd:
q=d[k]
sj=str(j)
ks[sj]=k
qn=q.get('name','')
out(sj+') '+qn)
j+=1
out('')
rx=inp({'text':'Make your selection (or press Enter for 0): '})
if rx['return']>0: return rx
sx=rx['string'].strip()
if sx=='':
if i.get('error_if_empty','')=='yes':
return {'return':1, 'error':'selection is empty'}
s=kd[0]
else:
if sx not in ks:
return {'return':1, 'error':'selection is not recognized'}
s=ks[sx]
return {'return':0, 'string':s}
##############################################################################
# Universal UOA selector
#
# TARGET: end users (advanced version available in module "choice")
def select_uoa(i):
"""
Input: {
choices - list from search function
(skip_enter) - if 'yes', do not select 0 when user presses Enter
(skip_sort) - if 'yes', do not sort array
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
choice - data UOA
}
"""
se=i.get('skip_enter','')
lst=i.get('choices',[])
if i.get('skip_sort','')!='yes':
klst=sorted(lst, key=lambda v: v['data_uoa'])
else:
klst=lst
zz={}
iz=0
for z1 in klst:
z=z1['data_uid']
zu=z1['data_uoa']
zs=str(iz)
zz[zs]=z
out(zs+') '+zu+' ('+z+')')
iz+=1
out('')
y='Select UOA'
if se!='yes': y+=' (or press Enter for 0)'
y+=': '
rx=inp({'text':y})
x=rx['string'].strip()
if x=='' and se!='yes': x='0'
if x not in zz:
return {'return':1, 'error':'number is not recognized'}
dduoa=zz[x]
return {'return':0, 'choice':dduoa}
##############################################################################
# Convert string to list
#
# TARGET: end users
def convert_str_tags_to_list(i):
"""
Input: either a list, or a string of comma-separated tags.
Output: If i is a list, it's returned.
If i is a string, the list of tags it represents is returned
(each tag is stripped of leading and trailing whitespace).
"""
r=[]
if type(i)==list:
r=i
else:
ii=i.split(',')
for q in ii:
q=q.strip()
if q!='':
r.append(q)
return r
##############################################################################
# Check writing possibility
#
# TARGET: CK kernel and low-level developers
def check_writing(i):
"""
Input: {
(module_uoa)
(module_uid)
(repo_uoa)
(repo_uid)
(repo_dict)
(delete) - if 'yes', check if global delete operation is allowed
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(repo_dict) - repo cfg if available
}
"""
dl=i.get('delete','')
if dl=='yes' and cfg.get('forbid_global_delete','')=='yes':
return {'return':1, 'error':'delete/rename operations are forbidden'}
if cfg.get('forbid_global_writing','')=='yes':
return {'return':1, 'error':'global writing is forbidden'}
if len(i)==0:
return {'return':0} # Check only global writing
if cfg.get('forbid_writing_modules','')=='yes':
muoa=i.get('module_uoa','')
muid=i.get('module_uid','')
if muoa==cfg['module_name'] or (muid!='' and muid in cfg['module_uids']):
return {'return':1, 'error':'writing/changing modules is forbidden'}
ruoa=i.get('repo_uoa','')
ruid=i.get('repo_uid','')
if cfg.get('forbid_writing_to_default_repo','')=='yes':
if ruoa==cfg['repo_name_default'] or ruid==cfg['repo_uid_default']:
return {'return':1, 'error':'writing to default repo is forbidden'}
if cfg.get('forbid_writing_to_local_repo','')=='yes':
if ruoa==cfg['repo_name_local'] or ruid==cfg['repo_uid_local']:
return {'return':1, 'error':'writing to local repo is forbidden'}
rr={'return':0}
# Load info about repo
rd={}
if ruoa!='':
if 'repo_dict' in i:
rd=i['repo_dict']
else:
rx=load_repo_info_from_cache({'repo_uoa':ruoa})
if rx['return']>0: return rx
rd=rx.get('dict',{})
rr['repo_dict']=rd
if cfg.get('allow_writing_only_to_allowed','')=='yes':
if rd.get('allow_writing','')!='yes':
return {'return':1, 'error':'writing to this repo is forbidden'}
if rd.get('forbid_deleting','')=='yes' and dl=='yes':
return {'return':1, 'error':'deleting in this repo is forbidden'}
return rr
##############################################################################
# Get CK version
#
# TARGET: end users
def get_version(i):
"""
Input: {}
Output: {
return - return code = 0
version - list starting from major version number
version_str - version string
}
"""
import copy
s=''
x=copy.deepcopy(cfg['version'])
for q in x:
if s!='': s+='.'
s+=str(q)
return {'return':0, 'version':x, 'version_str':s}
##############################################################################
# Generate temporary files
#
# TARGET: end users
def gen_tmp_file(i):
"""
Input: {
(suffix) - temp file suffix
(prefix) - temp file prefix
(remove_dir) - if 'yes', remove dir
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_name - temp file name
}
"""
xs=i.get('suffix','')
xp=i.get('prefix','')
s=i.get('string','')
import tempfile
fd, fn=tempfile.mkstemp(suffix=xs, prefix=xp)
os.close(fd)
os.remove(fn)
if i.get('remove_dir','')=='yes':
fn=os.path.basename(fn)
return {'return':0, 'file_name':fn}
##############################################################################
# Get host platform (currently win or linux) and OS bits
#
# TARGET: end users
def get_os_ck(i):
"""
Input: {
(bits) - force OS bits
}
Output: {
return - return code = 0
platform - 'win' or 'linux'
bits - OS bits in string: 32 or 64
python_bits - Python installation bits in string: 32 or 64
}
"""
import os
import platform
import struct
pbits=str(8 * struct.calcsize("P"))
plat='linux'
if platform.system().lower().startswith('win'): # pragma: no cover
plat='win'
obits=i.get('bits','')
if obits=='':
obits='32'
if plat=='win':
# Trying to get fast way to detect bits
if os.environ.get('ProgramW6432','')!='' or os.environ.get('ProgramFiles(x86)','')!='': # pragma: no cover
obits='64'
else:
# On Linux use first getconf LONG_BIT and if doesn't work use python bits
obits=pbits
r=gen_tmp_file({})
if r['return']>0: return r
fn=r['file_name']
cmd='getconf LONG_BIT > '+fn
rx=os.system(cmd)
if rx==0:
r=load_text_file({'text_file':fn,
'delete_after_read':'yes'})
if r['return']==0:
s=r['string'].strip()
if len(s)>0 and len(s)<4:
obits=s
return {'return':0, 'platform':plat, 'bits':obits, 'python_bits':pbits}
##############################################################################
# Generate CK UID
#
# TARGET: end users
def gen_uid(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
data_uid - UID in string format (16 characters 0..9,a..f)
}
"""
import uuid
import random
uid=str(uuid.uuid4().hex)
if len(uid)!=32:
return {'return':1, 'error':'problem generating UID : len='+str(len(uid))+' !=32'} # pragma: no cover
random.seed
x=random.randrange(0,16)
return {'return':0, 'data_uid':uid[x:x+16]}
##############################################################################
# Check if string is CK UID
#
# TARGET: end users
def is_uid(str):
"""
Input: string to check
Output: True if UID, otherwise False
"""
import re
if len(str)!=16:
return False
pattern = r'[^\.a-f0-9]'
if re.search(pattern, str.lower()):
return False
return True
##############################################################################
# Check if string is correct CK UOA
# (i.e. does not have special characters including *, ?)
#
# TARGET: end users
def is_uoa(str):
"""
Input: string to check
Output: True if allowed UOA, False otherwise
"""
if str.find(cfg['detect_cur_cid'])>=0 or str.find(cfg['detect_cur_cid1'])>=0: return False
if str.find('*')>=0: return False
if str.find('?')>=0: return False
return True
##############################################################################
# Prepare special info about entry (engine used, author, date, etc)
#
# TARGET: CK kernel and low-level developers
def prepare_special_info_about_entry(i):
"""
Input: {
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict with info
}
"""
# Add control info
d={'engine':'CK',
'version':cfg['version']}
if cfg.get('default_developer','')!='':
d['author']=cfg['default_developer']
if cfg.get('default_developer_email','')!='':
d['author_email']=cfg['default_developer_email']
if cfg.get('default_developer_webpage','')!='':
d['author_webpage']=cfg['default_developer_webpage']
if cfg.get('default_license','')!='':
d['license']=cfg['default_license']
if cfg.get('default_copyright','')!='':
d['copyright']=cfg['default_copyright']
r=get_current_date_time({})
d['iso_datetime']=r['iso_datetime']
return {'return':0, 'dict': d}
##############################################################################
# Convert string of a special format to json
#
# TARGET: end users
def convert_json_str_to_dict(i):
"""
Input: {
str - string (use ' instead of ", i.e. {'a':'b'}
to avoid issues in CMD in Windows and Linux!)
(skip_quote_replacement) - if 'yes', do not make above replacement
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict from json file
}
"""
s=i['str']
if i.get('skip_quote_replacement','')!='yes':
s=s.replace('"', '\\"')
s=s.replace('\'', '"')
try:
d=json.loads(s, encoding='utf8')
except Exception as e:
return {'return':1, 'error':'problem converting text to json ('+format(e)+')'}
return {'return':0, 'dict': d}
##############################################################################
# Load json from file into dict
#
# TARGET: end users
def load_json_file(i):
"""
Input: {
json_file - name of file with json
}
Output: {
return - return code = 0, if successful
= 16, if file not found (may be warning)
> 0, if error
(error) - error text if return > 0
dict - dict from json file
}
"""
fn=i['json_file']
try:
if sys.version_info[0]>2:
f=open(fn, 'r', encoding='utf8')
else:
f=open(fn, 'r')
except Exception as e:
return {'return':16, 'error':'problem opening json file='+fn+' ('+format(e)+')'}
try:
s=f.read()
except Exception as e:
f.close()
return {'return':1, 'error':'problem reading json file='+fn+' ('+format(e)+')'}
f.close()
try:
if sys.version_info[0]>2:
d=json.loads(s)
else:
d=json.loads(s, encoding='utf8')
except Exception as e:
return {'return':1, 'error':'problem parsing json from file='+fn+' ('+format(e)+')'}
return {'return':0, 'dict': d}
##############################################################################
# Load YAML from file into dict
#
# TARGET: end users
def load_yaml_file(i):
"""
Input: {
yaml_file - name of YAML file
}
Output: {
return - return code = 0, if successful
= 16, if file not found (may be warning)
> 0, if error
(error) - error text if return > 0
dict - dict from YAML file
}
"""
import yaml
fn=i['yaml_file']
try:
if sys.version_info[0]>2:
f=open(fn, 'r', encoding='utf8')
else:
f=open(fn, 'r')
except Exception as e:
return {'return':16, 'error':'problem opening YAML file='+fn+' ('+format(e)+')'}
try:
s=f.read()
except Exception as e:
f.close()
return {'return':1, 'error':'problem reading YAML file='+fn+' ('+format(e)+')'}
f.close()
try:
d=yaml.load(s)
except Exception as e:
return {'return':1, 'error':'problem parsing YAML from file='+fn+' ('+format(e)+')'}
return {'return':0, 'dict': d}
##############################################################################
# Load text file into string
#
# TARGET: end users
def load_text_file(i):
"""
Input: {
text_file - name of text file
(keep_as_bin) - if 'yes', return only bin
(encoding) - by default 'utf8', however sometimes we use utf16
(split_to_list) - if 'yes', split to list
(convert_to_dict) - if 'yes', split to list and convert to dict
(str_split) - if !='', use as separator of keys/values when converting to dict
(remove_quotes) - if 'yes', remove quotes from values when converting to dict
(delete_after_read) - if 'yes', delete file after read (useful when reading tmp files)
}
Output: {
return - return code = 0, if successful
= 16, if file not found (may be warning)
> 0, if error
(error) - error text if return > 0
bin - bin
(string) - loaded text (with removed \r)
(lst) - if split_to_list=='yes', return as list
(dict) - if convert_to_dict=='yes', return as dict
}
"""
fn=i['text_file']
en=i.get('encoding','')
if en=='' or en==None: en='utf8'
try:
f=open(fn, 'rb')
except Exception as e:
return {'return':16, 'error':'problem opening text file='+fn+' ('+format(e)+')'}
try:
b=f.read()
except Exception as e:
f.close()
return {'return':1, 'error':'problem reading text file='+fn+' ('+format(e)+')'}
f.close()
r={'return':0, 'bin':b}
if i.get('delete_after_read','')=='yes':
import os
os.remove(fn)
if i.get('keep_as_bin','')!='yes':
s=b.decode(en).replace('\r','') # decode into Python string (unicode in Python3)
r['string']=s
cl=i.get('split_to_list','')
cd=i.get('convert_to_dict','')
if cl=='yes' or cd=='yes':
lst=s.split('\n')
r['lst']=lst
if cd=='yes':
dd={}
ss=i.get('str_split','')
rq=i.get('remove_quotes','')
if ss=='': ss=':'
for q in lst:
qq=q.strip()
ix=qq.find(ss)
if ix>0:
k=qq[0:ix].strip()
v=''
if ix+1<len(qq):
v=qq[ix+1:].strip()
if v!='' and rq=='yes':
if v.startswith('"'): v=v[1:]
if v.endswith('"'): v=v[:-1]
dd[k]=v
r['dict']=dd
return r
##############################################################################
# Substitute string in file
#
# TARGET: end users
def substitute_str_in_file(i):
"""
Input: {
filename - file
string1 - string to be replaced
string2 - replace string
}
Output: {
return - return code = 0, if successful
= 16, if file not found
> 0, if error
(error) - error text if return > 0
}
"""
fn=i['filename']
s1=i['string1']
s2=i['string2']
# Load text file (unicode)
r=load_text_file({'text_file':fn})
if r['return']>0: return r
# Replace
x=r['string']
x=x.replace(s1,s2)
# Save text file (unicode)
r=save_text_file({'text_file':fn, 'string':x})
if r['return']>0: return r
return {'return':0}
##############################################################################
# Deprecated: Dump json to sring (left for compatibility with older kernel - should eventually remove it) - see 'dump_json'
#
# TARGET: end users
def dumps_json(i):
"""
Input: {
dict - dictionary
(skip_indent) - if 'yes', skip indent
(sort_keys) - if 'yes', sort keys
}
Output: {
return - return code = 0, if successful
> 0, if error
string - json string (in utf8)
}
"""
return dump_json(i)
##############################################################################
# Dump json to sring
#
# TARGET: end users
def dump_json(i):
"""
Input: {
dict - dictionary
(skip_indent) - if 'yes', skip indent
(sort_keys) - if 'yes', sort keys
}
Output: {
return - return code = 0, if successful
> 0, if error
string - json string (in utf8)
}
"""
d=i['dict']
si=i.get('skip_indent','')
sk=False
if i.get('sort_keys','')=='yes': sk=True
try:
if sys.version_info[0]>2:
if si=='yes': s=json.dumps(d, ensure_ascii=False, sort_keys=sk)
else: s=json.dumps(d, indent=2, ensure_ascii=False, sort_keys=sk)
else:
if si=='yes': s=json.dumps(d, ensure_ascii=False, encoding='utf8', sort_keys=sk)
else: s=json.dumps(d, indent=2, ensure_ascii=False, encoding='utf8', sort_keys=sk)
except Exception as e:
return {'return':1, 'error':'problem converting dict to json ('+format(e)+')'}
return {'return':0, 'string':s}
##############################################################################
# Save dict as json file
#
# TARGET: end users
def save_json_to_file(i):
"""
Input: {
json_file - file name
dict - dict to save
(sort_keys) - if 'yes', sort keys
(safe) - if 'yes', ignore non-JSON values (only for Debugging - changes original dict!)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
fn=i['json_file']
if i.get('safe','')=='yes':
d=i['dict']
sd={}
# Check main unprintable keys
for k in d:
try:
json.dumps(d[k])
except Exception as e:
pass
else:
sd[k]=d[k]
i['dict']=sd
r=dumps_json(i)
if r['return']>0: return r
s=r['string'].replace('\r','')+'\n'
return save_text_file({'text_file':fn, 'string':s})
##############################################################################
# Save dict as yaml file
#
# TARGET: end users
def save_yaml_to_file(i):
"""
Input: {
yaml_file - file name
dict - dict to save
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
import yaml
fn=i['yaml_file']
d=i['dict']
try:
# If using just dump and keys are in unicode,
# pyyaml adds warning and makes produced yaml unparsable
s=yaml.safe_dump(d)
except Exception as e:
return {'return':1, 'error':'problem converting dict to YAML ('+format(e)+')'}
return save_text_file({'text_file':fn, 'string':s})
##############################################################################
# save string into text file
#
# TARGET: end users
def save_text_file(i):
"""
Input: {
text_file - name of text file
string - string to write (with removed \r)
(append) - if 'yes', append
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
fn=i['text_file']
s=i['string']
try:
s=s.replace('\r','')
except Exception as e:
pass
try:
s=s.replace(b'\r',b'')
except Exception as e:
pass
m='w'
if i.get('append','')=='yes': m='a'
try:
s=s.encode('utf8')
except Exception as e:
pass
try:
# if sys.version_info[0]>2:
# f=open(fn, m+'b')
# f.write(s)
# else:
f=open(fn, m+'b')
f.write(s)
except Exception as e:
return {'return':1, 'error':'problem writing text file='+fn+' ('+format(e)+')'}
f.close()
return {'return':0}
##############################################################################
# Copy string to clipboard if supported by OS (requires Tk)
#
# TARGET: end users
def copy_to_clipboard(i): # pragma: no cover
"""
Input: {
string - string to copy
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
s=i['string']
failed=False
ee=''
# Try to load pyperclip (seems to work fine on Windows)
try:
import pyperclip
except Exception as e:
ee=format(e)
failed=True
pass
if not failed:
pyperclip.copy(s)
else:
failed=False
# Try to load Tkinter
try:
from Tkinter import Tk
except ImportError as e:
ee=format(e)
failed=True
pass
if failed:
failed=False
try:
from tkinter import Tk
except ImportError as e:
ee=format(e)
failed=True
pass
if failed:
return {'return':1, 'error':'none of pyperclip/Tkinter/tkinter packages is installed'}
# Copy to clipboard
try:
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(s)
r.destroy()
except Exception as e:
return {'return':1, 'error':'problem copying string to clipboard ('+format(e)+')'}
return {'return':0}
##############################################################################
# Merge intelligently dict1 with dict2 key by key in contrast with dict1.update(dict2)
#
# TARGET: end users
def merge_dicts(i):
"""
Input: {
dict1 - merge this dict with dict2 (will be directly modified!)
dict2 - dict
Output: {
return - return code = 0, if successful
dict1 - output dict
}
"""
a=i['dict1']
b=i['dict2']
for k in b:
v=b[k]
if type(v) is dict:
if k not in a:
a.update({k:b[k]})
elif type(a[k])==dict:
merge_dicts({'dict1':a[k], 'dict2':b[k]})
else:
a[k]=b[k]
elif type(v) is list:
a[k]=[]
for y in v:
a[k].append(y)
else:
a[k]=b[k]
return {'return':0, 'dict1':a}
##############################################################################
# Convert file to upload string
#
# TARGET: end users
def convert_file_to_upload_string(i):
"""
Input: {
filename - file name to convert
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
file_content_base64 - string that can be transmitted through Internet
}
"""
import base64
fn=i['filename']
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
s=b''
try:
f=open(fn, 'rb')
while True:
x = f.read(32768);
if not x: break
s+=x
f.close()
except Exception as e:
return {'return':1, 'error':'error reading file ('+format(e)+')'}
s=base64.urlsafe_b64encode(s).decode('utf8')
return {'return':0, 'file_content_base64': s}
##############################################################################
# Convert upload string to file
#
# TARGET: end users
def convert_upload_string_to_file(i):
"""
Input: {
file_content_base64 - string transmitted through Internet
(filename) - file name to write (if empty, generate tmp file)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
filename - filename with full path
filename_ext - filename extension
}
"""
import base64
x=i['file_content_base64']
fc=base64.urlsafe_b64decode(str(x)) # convert from unicode to str since base64 works on strings
# should be safe in Python 2.x and 3.x
fn=i.get('filename','')
if fn=='':
rx=gen_tmp_file({'prefix':'tmp-'})
if rx['return']>0: return rx
px=rx['file_name']
else:
px=fn
fn1, fne = os.path.splitext(px)
if os.path.isfile(px):
return {'return':1, 'error':'file already exists in the current directory'}
try:
fx=open(px, 'wb')
fx.write(fc)
fx.close()
except Exception as e:
return {'return':1, 'error':'problem writing file='+px+' ('+format(e)+')'}
return {'return':0, 'filename':px, 'filename_ext': fne}
##############################################################################
# Input JSON from console (double enter to finish)
#
# TARGET: end users
def input_json(i):
"""
Input: {
text - text to print
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
string
dict - parsed JSON
}
"""
t=i['text']
out(t)
s=''
while True:
r=inp({'text':''})
if r['return']>0: return r
ss=r['string'].strip()
if ss=='': break
s+=ss
s=s.strip()
if s=='': s='{}' # empty json
else:
if not s.startswith('{'): s='{'+s
if not s.endswith('}'): s+='}'
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0: return r
d=r['dict']
return {'return':0, 'string': s, 'dict':d}
##############################################################################
# Convert CK list to CK dict with unicode in UTF-8 (unification of interfaces)
#
# TARGET: CK kernel and low-level developers
def convert_ck_list_to_dict(i):
"""
Input: [
CK list: see 'action' function from this kernel
]
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
ck_dict - {
"action":action
"cid":module_uoa or CID (x means that it may not be really CID
and has to be processed specially
"cids":[cid1, cid2, cid3, ...]
"key1":value1
"key2":value2
...
"key10":""
"key11":value11
keys/values from file_json; if file extension is .tmp,
it will be deleted after read!
keys/values from cmd_json
"unparsed":unparsed_cmd
}
"""
obj={}
obj['cids']=[]
l=len(i)
if l>0: obj['action']=i[0]
module_uoa_or_cid=''
# Parsing
cx=True # Start first processing CIDs and then turn it off when something else is encountered
if l>1:
for x in range(1, len(i)):
p=i[x].rstrip()
#####################################
if p=='--':
cx=False
p2=i[x+1:]
obj['unparsed']=p2
break
#####################################
elif p.startswith('--'):
cx=False
p=p[2:]
p1=p
p2='yes'
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
elif p.startswith('-'):
cx=False
p=p[1:]
p1=p
p2='yes'
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
elif p.startswith("@@@"):
cx=False
jd=p[3:]
if len(jd)<3:
return {'return':1, 'error':'can\'t parse command line option '+p}
y=convert_json_str_to_dict({'str':jd})
if y['return']>0: return y
merge_dicts({'dict1':obj, 'dict2':y['dict']})
#####################################
elif p.startswith("@@"):
cx=False
key=p[2:]
x='Add JSON to input'
if key!='': x+=' for key "'+key+'"'
x+=' (double Enter to stop):\n'
rx=input_json({'text':x})
if rx['return']>0: return rx
dy=rx['dict']
dx=obj
if key!='':
if key not in obj: obj[key]={}
dx=obj[key]
merge_dicts({'dict1':dx, 'dict2':dy})
#####################################
elif p.startswith("@"):
cx=False
name=p[1:]
if len(name)<2:
return {'return':1, 'error':'can\'t parse command line option '+p}
y=load_json_file({'json_file':name})
if y['return']>0: return y
if name.endswith('.tmp'):
os.remove(name)
merge_dicts({'dict1':obj, 'dict2':y['dict']})
#####################################
elif p.find('=')>=0:
cx=False
p1=p
p2=''
q=p.find("=")
if q>0:
p1=p[0:q]
if len(p)>q:
p2=p[q+1:]
obj[p1]=p2
#####################################
else:
# If no module_uoa_or_cid -> set it
if module_uoa_or_cid=='':
module_uoa_or_cid=p
else:
# Otherwise add to CIDs
obj['cids'].append(p)
if module_uoa_or_cid!='': obj['cid']=module_uoa_or_cid
return {'return':0, 'ck_dict':obj}
##############################################################################
# Init CK (current instance - has state!)
#
# TARGET: internal use
def init(i): # pragma: no cover
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
global cfg, work, initialized, paths_repos, type_long, string_io
if initialized:
return {'return':0}
# Split version
cfg['version']=__version__.split('.')
# Default URL. FIXME: should be formed from wfe_host and wfe_port when they are known.
# cfg['wfe_url_prefix'] = 'http://%s:%s/web?' % (cfg['default_host'], cfg['default_port'])
# Check long/int types
try:
x=long
except Exception as e:
type_long=int
else:
type_long=long
# Import StringIO
if sys.version_info[0]>2:
import io
string_io=io.StringIO
else:
from StringIO import StringIO
string_io=StringIO
# Check where are repos (to keep compatibility with past CK < V1.5)
p=''
import inspect
pxx=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
px=os.path.dirname(pxx)
py=os.path.join(pxx, cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
if p=='':
from distutils.sysconfig import get_python_lib
px=get_python_lib()
py=os.path.join(px, cfg['kernel_dir'], cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
if p=='':
import site
for px in site.getsitepackages():
py=os.path.join(px, cfg['kernel_dir'],cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
break
# Check CK_ROOT environment variable
s=os.environ.get(cfg['env_key_root'],'').strip()
if s!='':
work['env_root']=os.path.realpath(s)
for px in cfg['kernel_dirs']:
py=os.path.join(work['env_root'], px, cfg['subdir_default_repo'])
if os.path.isdir(py):
p=py
break
elif px!='':
work['env_root']=px
if p=='':
return {'return':1, 'error':'Internal CK error (can\'t find default repo) - please report to authors'}
# Check default repo
work['dir_default_repo']=p
work['dir_default_repo_path']=os.path.join(work['dir_default_repo'], cfg['module_repo_name'], cfg['repo_name_default'])
work['dir_default_kernel']=os.path.join(work['dir_default_repo'], cfg['subdir_kernel'])
work['dir_default_cfg']=os.path.join(work['dir_default_kernel'], cfg['subdir_kernel_default'], cfg['subdir_ck_ext'], cfg['file_meta'])
work['dir_work_repo']=work['dir_default_repo']
work['dir_work_repo_path']=work['dir_default_repo_path']
work['dir_work_kernel']=work['dir_default_kernel']
work['dir_work_cfg']=work['dir_default_cfg']
work['repo_name_work']=cfg['repo_name_default']
work['repo_uid_work']=cfg['repo_uid_default']
# Check external repos
rps=os.environ.get(cfg['env_key_repos'],'').strip()
if rps=='':
# Get home user directory
from os.path import expanduser
home = expanduser("~")
# In the original version, if path to repos was not defined, I was using CK path,
# however, when installed as root, it will fail
# rps=os.path.join(work['env_root'],cfg['subdir_default_repos'])
# hence I changed to <user home dir>/CK
rps=os.path.join(home, cfg['user_home_dir_ext'])
if not os.path.isdir(rps):
os.makedirs(rps)
work['dir_repos']=rps
# Check CK_LOCAL_REPO environment variable - if doesn't exist, create in user space
s=os.environ.get(cfg['env_key_local_repo'],'').strip()
if s=='':
# Set up local default repository
s=os.path.join(rps, cfg['repo_name_local'])
if not os.path.isdir(s):
os.makedirs(s)
# Create description
rq=save_json_to_file({'json_file':os.path.join(s,cfg['repo_file']),
'dict':{'data_alias':cfg['repo_name_local'],
'data_uoa':cfg['repo_name_local'],
'data_name':cfg['repo_name_local'],
'data_uid':cfg['repo_uid_local']},
'sort_keys':'yes'})
if rq['return']>0: return rq
if s!='':
work['local_kernel_uoa']=cfg['subdir_kernel_default']
x=os.environ.get(cfg['env_key_local_kernel_uoa'],'').strip()
if x!='': work['local_kernel_uoa']=x
work['dir_local_repo']=os.path.realpath(s)
work['dir_local_repo_path']=os.path.join(work['dir_local_repo'], cfg['module_repo_name'], cfg['repo_name_local'])
work['dir_local_kernel']=os.path.join(work['dir_local_repo'], cfg['subdir_kernel'])
work['dir_local_cfg']=os.path.join(work['dir_local_kernel'], work['local_kernel_uoa'], cfg['subdir_ck_ext'], cfg['file_meta'])
# Update work repo!
work['dir_work_repo']=work['dir_local_repo']
work['dir_work_repo_path']=work['dir_local_repo_path']
work['dir_work_kernel']=work['dir_local_kernel']
work['dir_work_cfg']=work['dir_local_cfg']
work['repo_name_work']=cfg['repo_name_local']
work['repo_uid_work']=cfg['repo_uid_local']
paths_repos.append({'path':work['dir_local_repo'],
'repo_uoa':cfg['repo_name_local'],
'repo_uid':cfg['repo_uid_local'],
'repo_alias':cfg['repo_name_local']})
paths_repos.append({'path':work['dir_default_repo'],
'repo_uoa':cfg['repo_name_default'],
'repo_uid':cfg['repo_uid_default'],
'repo_alias':cfg['repo_name_default']})
# Prepare repo cache
work['dir_cache_repo_uoa']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_uoa'])
work['dir_cache_repo_info']=os.path.join(work['dir_work_repo'],cfg['file_cache_repo_info'])
# Check if first time and then copy local cache files (with remote-ck)
if not os.path.isfile(work['dir_cache_repo_uoa']) and not os.path.isfile(work['dir_cache_repo_info']):
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_uoa'])})
if rx['return']>0: return rx
x1=rx['string']
rx=load_text_file({'text_file':os.path.join(work['dir_default_repo'],cfg['file_cache_repo_info'])})
if rx['return']>0: return rx
x2=rx['string']
rx=save_text_file({'text_file':work['dir_cache_repo_info'], 'string':x2})
if rx['return']>0: return rx
rx=save_text_file({'text_file':work['dir_cache_repo_uoa'], 'string':x1})
if rx['return']>0: return rx
# Check if local configuration exists, and if not, create it
if not os.path.isfile(work['dir_local_cfg']):
# Create empty local configuration
rx=add({'repo_uoa':cfg['repo_name_local'],
'module_uoa':cfg['subdir_kernel'],
'data_uoa':work['local_kernel_uoa']})
if rx['return']>0:
return {'return':rx['return'],
'error':'can\'t create local configuration entry'}
# Read kernel configuration (if exists)
if os.path.isfile(work['dir_work_cfg']):
r=load_json_file({'json_file':work['dir_work_cfg']})
if r['return']>0: return r
cfg1=r['dict']
# Update cfg
r=merge_dicts({'dict1':cfg, 'dict2':cfg1})
if r['return']>0: return r
initialized=True
return {'return':0}
##############################################################################
# List all files recursively in a given directory
#
# TARGET: CK kernel and low-level developers
def list_all_files(i):
"""
Input: {
path - top level path
(file_name) - search for a specific file name
(pattern) - return only files with this pattern
(path_ext) - path extension (needed for recursion)
(limit) - limit number of files (if directories with a large number of files)
(number) - current number of files
(all) - if 'yes' do not ignore special directories (like .cm)
(ignore_names) - list of names to ignore
(ignore_symb_dirs) - if 'yes', ignore symbolically linked dirs
(to avoid recursion such as in LLVM)
(add_path) - if 'yes', add path
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
list - dictionary of all files:
{"file_with_full_path":{"size":.., "path":..}
sizes - sizes of files (the same order)
number - number of files in a current directory (needed for recursion)
}
"""
number=0
if i.get('number','')!='':
number=int(i['number'])
inames=i.get('ignore_names',[])
fname=i.get('file_name','')
limit=-1
if i.get('limit','')!='':
limit=int(i['limit'])
a={}
iall=i.get('all','')
pe=''
if i.get('path_ext','')!='':
pe=i['path_ext']
po=i.get('path','')
if sys.version_info[0]<3: po=unicode(po)
pattern=i.get('pattern','')
if pattern!='':
import fnmatch
xisd=i.get('ignore_symb_dirs','')
isd=False
if xisd=='yes': isd=True
ap=i.get('add_path','')
try:
dirList=os.listdir(po)
except Exception as e:
None
else:
for fn in dirList:
p=os.path.join(po, fn)
if iall=='yes' or fn not in cfg['special_directories']:
if len(inames)==0 or fn not in inames:
if os.path.isdir(p):
if not isd or os.path.realpath(p)==p:
r=list_all_files({'path':p, 'all':iall, 'path_ext':os.path.join(pe, fn),
'number':str(number), 'ignore_names':inames, 'pattern':pattern,
'file_name':fname, 'ignore_symb_dirs':xisd, 'add_path':ap, 'limit': limit})
if r['return']>0: return r
a.update(r['list'])
else:
add=True
if fname!='' and fname!=fn:
add=False
if pattern!='' and not fnmatch.fnmatch(fn, pattern):
add=False
if add:
pg=os.path.join(pe, fn)
if os.path.isfile(p):
a[pg]={'size':os.stat(p).st_size}
if ap=='yes': a[pg]['path']=po
number=len(a)
if limit!=-1 and number>=limit:
break
return {'return':0, 'list':a, 'number':str(number)}
##############################################################################
# Reload repo cache
#
# TARGET: CK kernel and low-level developers
def reload_repo_cache(i):
"""
Input: {
(force) - if 'yes', force recaching
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
global cache_repo_uoa, cache_repo_info, paths_repos_all, cache_repo_init
if i.get('force','')=='yes': # pragma: no cover
cache_repo_init=False
paths_repos_all=[]
if not cache_repo_init:
# Load repo UOA -> UID disambiguator
r=load_json_file({'json_file':work['dir_cache_repo_uoa']})
if r['return']!=16 and r['return']>0: return r
cache_repo_uoa=r.get('dict',{})
# Load cached repo info
r=load_json_file({'json_file':work['dir_cache_repo_info']})
if r['return']!=16 and r['return']>0: return r
cache_repo_info=r.get('dict',{})
# Prepare all paths
for q in cache_repo_info:
qq=cache_repo_info[q]
dd=qq['dict']
p=dd.get('path','')
if p!='':
paths_repos_all.append({'path':os.path.normpath(p),
'repo_uoa':qq['data_uoa'],
'repo_uid':qq['data_uid'],
'repo_alias':qq['data_alias']})
cache_repo_init=True
return {'return':0}
##############################################################################
# Save repo cache
#
# TARGET: CK kernel and low-level developers
def save_repo_cache(i):
"""
Input: {}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
r=save_json_to_file({'json_file':work['dir_cache_repo_uoa'], 'dict':cache_repo_uoa})
if r['return']>0: return r
r=save_json_to_file({'json_file':work['dir_cache_repo_info'], 'dict':cache_repo_info})
if r['return']>0: return r
return {'return':0}
##############################################################################
# Load repo from cache
#
# TARGET: CK kernel and low-level developers
def load_repo_info_from_cache(i):
"""
Input: {
repo_uoa - repo_uoa
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
all other info from repo dict
}
"""
ruoa=i['repo_uoa']
ruid=ruoa
if ruoa==cfg['repo_name_default'] or ruoa==cfg['repo_uid_default']:
d={}
d["path_to_repo_desc"]=work['dir_default_repo_path']
d["data_uid"]=cfg['repo_uid_default']
d["data_alias"]=cfg['repo_name_default']
d["data_uoa"]=cfg['repo_name_default']
d["dict"]={"default":"yes"}
elif ruoa==cfg['repo_name_local'] or ruoa==cfg['repo_uid_local']:
d={}
d["path_to_repo_desc"]=work['dir_local_repo_path']
d["data_uid"]=cfg['repo_uid_local']
d["data_alias"]=cfg['repo_name_local']
d["data_uoa"]=cfg['repo_name_local']
d["dict"]={"default":"yes"}
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(ruoa):
ruid=cache_repo_uoa.get(ruoa,'')
if ruid=='':
return {'return':1, 'error':'repository "'+ruoa+'" is not found in the cache. Check if repository exists or try "ck recache repo"'}
d=cache_repo_info.get(ruid,{})
if len(d)==0:
return {'return':1, 'error':'repository is not found in the cache'}
r={'return':0}
r.update(d)
return r
##############################################################################
# Find repo by path
#
# TARGET: CK kernel and low-level developers
def find_repo_by_path(i):
"""
Input: {
path - path to repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
p=i['path']
if p!='': p=os.path.normpath(p)
found=False
if p==work['dir_default_repo']:
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
found=True
elif p==work['dir_local_repo']:
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
found=True
else:
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
for q in cache_repo_info:
qq=cache_repo_info[q]
if p==qq['dict'].get('path',''):
uoa=qq['data_uoa']
uid=qq['data_uid']
alias=uid
if not is_uid(uoa): alias=uoa
found=True
break
if not found:
return {'return':16, 'error': 'repository not found in this path'}
return {'return':0, 'repo_uoa': uoa, 'repo_uid': uid, 'repo_alias':alias}
##############################################################################
# Find path to a given repo
#
# TARGET: end users
def find_path_to_repo(i):
"""
Input: {
(repo_uoa) - repo UOA; if empty, get the default repo
}
Output: {
return - return code = 0, if successful
16, if repo not found (may be warning)
> 0, if error
(error) - error text if return > 0
dict - dict from cache
path - path to repo
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
}
"""
a=i.get('repo_uoa','')
ai=a
pr=''
if a!='':
if a==cfg['repo_name_default'] or a==cfg['repo_uid_default']:
pr=work['dir_default_repo']
uoa=cfg['repo_name_default']
uid=cfg['repo_uid_default']
alias=uoa
dt={}
elif a==cfg['repo_name_local'] or a==cfg['repo_uid_local']:
pr=work['dir_local_repo']
uoa=cfg['repo_name_local']
uid=cfg['repo_uid_local']
alias=uoa
dt={}
else:
# Reload cache if not initialized
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
if not is_uid(a):
ai=cache_repo_uoa.get(a,'')
if ai=='':
return {'return':1, 'error':'repository "'+a+'" was not found in cache'}
cri=cache_repo_info.get(ai, {})
if len(cri)==0:
return {'return':1, 'error':'repository "'+ai+'" was not found in cache'}
dt=cri.get('dict',{})
pr=dt.get('path','')
uoa=cri['data_uoa']
uid=cri['data_uid']
alias=cri['data_alias']
else:
# Get current repo path
pr=work['dir_work_repo']
uoa=work['repo_name_work']
uid=work['repo_uid_work']
alias=uoa
dt={}
return {'return':0, 'path':pr, 'repo_uoa':uoa, 'repo_uid':uid, 'repo_alias':alias, 'dict':dt}
##############################################################################
# Find path to data (first search in default repo, then local one and then all other repos)
#
# TARGET: CK kernel and low-level developers
def find_path_to_data(i):
"""
Input: {
(repo_uoa) - repo UOA
module_uoa - module UOA
data_uoa - data UOA
}
Output: {
return - return code = 0, if successful
16, if data not found (may be warning)
> 0, if error
(error) - error text if return > 0
path - path to data
path_module - path to module entry with this entry
path_repo - path to the repository of this entry
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
module_uoa - module UOA
module_uid - module UID
module_alias - module alias
uoa - data UOA
uid - data UID
alias - data alias
}
"""
muoa=i['module_uoa']
muid='?'
duoa=i['data_uoa']
duid='?'
ruoa=i.get('repo_uoa','')
ruid=''
ralias=''
if ruoa!='':
r=find_path_to_repo({'repo_uoa':ruoa})
if r['return']>0: return r
ps=[r]
qmax=1
else:
ps=paths_repos
qmax=2
# Search
found=False
pr=''
pm=''
pd=''
for q in range(0,qmax):
if found: break
if q==1:
# Check / reload all repos
r=reload_repo_cache({}) # Ignore errors
if r['return']>0: return r
ps=paths_repos_all
for prx in ps:
pr=prx['path']
ruoa=prx['repo_uoa']
ruid=prx['repo_uid']
ralias=prx['repo_alias']
r=find_path_to_entry({'path':pr, 'data_uoa':muoa})
if r['return']>0 and r['return']!=16: return r
elif r['return']==0:
muoa=r['data_uoa']
muid=r['data_uid']
malias=r['data_alias']
pm=r['path']
r1=find_path_to_entry({'path':pm, 'data_uoa':duoa})
if r1['return']>0 and r1['return']!=16: return r1
elif r1['return']==0:
found=True
pd=r1['path']
duoa=r1['data_uoa']
duid=r1['data_uid']
dalias=r1['data_alias']
break
if found: break
if not found:
s=''
# if ruoa!='': s+=ruoa+':'
s+=muoa+':'+duoa+'" ('
if ruoa!='':
# if ruid!='':s+=ruid+':'
# else: s+='?:'
s+='?:'
s+=muid+':'+duid+')'
return {'return':16, 'error':'can\'t find path to CK entry "'+s}
# # Get info about repo
# if ruid=='':
# r=find_repo_by_path({'path':pr})
# if r['return']>0: return r
# ruoa=r['repo_uoa']
# ruid=r['repo_uid']
# ralias=r['repo_alias']
# qmax=1
return {'return':0, 'path':pd, 'path_module':pm, 'path_repo':pr,
'repo_uoa':ruoa, 'repo_uid':ruid, 'repo_alias':ralias,
'module_uoa':muoa, 'module_uid':muid, 'module_alias':malias,
'data_uoa':duoa, 'data_uid':duid, 'data_alias':dalias}
##############################################################################
# Find path to an UOA entry (check UID or alias)
#
# TARGET: CK kernel and low-level developers
def find_path_to_entry(i):
"""
Input: {
path - path to a repository
data_uoa - data UOA
}
Output: {
return - return code = 0, if successful
16, if data not found (may be warning)
> 0, if error
(error) - error text if return > 0
path - path to data entry
data_uid - data uid (from UOA)
data_alias - data alias (from UOA)
data_uoa - data alias or data uid, if data alias==''
}
"""
p=i['path']
duoa=i['data_uoa']
if duoa=='': # pragma: no cover
raise Exception('data_uoa is empty')
# Disambiguate UOA
alias=''
if is_uid(duoa):
# If UID
uid=duoa
# Check if alias exists
p1=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_alias_u'] + uid)
found_alias=False
if os.path.isfile(p1):
try:
f=open(p1)
alias=f.readline().strip()
f.close()
found_alias=True
except Exception as e:
None
# If alias exists, check directory with alias
if found_alias:
p2=os.path.join(p, alias)
return {'return':0, 'path':p2, 'data_uid':uid, 'data_alias':alias, 'data_uoa':alias}
p2=os.path.join(p, uid)
if os.path.isdir(p2):
return {'return':0, 'path':p2, 'data_uid':uid, 'data_alias':'', 'data_uoa':uid}
return {'return':-1}
# If alias
alias=duoa
p1=os.path.join(p, alias)
if sys.version_info[0]<3:
try: p1=p1.encode('utf8')
except Exception as e: pass
if os.path.isdir(p1):
# Check uid for this alias
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_alias_a'] + alias)
try:
f=open(p2)
uid=f.readline().strip()
f.close()
except Exception as e:
return {'return':10, 'error':'inconsistent entry: alias "'+alias+'" exists, but not the UID in file '+p2,
'path':p1, 'data_alias':alias}
return {'return':0, 'path':p1, 'data_uid':uid, 'data_alias':alias, 'data_uoa':alias}
return {'return':16, 'error':'can\'t find path to CK entry'}
##############################################################################
# Load meta description from a path
#
# TARGET: CK kernel and low-level developers
def load_meta_from_path(i):
"""
Input: {
path - path to a data entry
(skip_updates) - if 'yes', do not load updates
(skip_desc) - if 'yes', do not load descriptions
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
dict - dict with meta description
path - path to json file with meta description
(info) - dict with info if exists
(path_info) - path to json file with info
(updates) - dict with updates if exists
(path_updates) - path to json file with updates
(path_desc) - path to json file with API description
}
"""
p=i['path']
slu=i.get('skip_updates','')
sld=i.get('skip_desc','')
p1=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_meta'])
if not os.path.isfile(p1):
p1=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_meta_old']) # For compatibility with cM
if not os.path.isfile(p1):
p1=''
if p1!='':
rx={'return':0}
r=load_json_file({'json_file':p1})
if r['return']>0: return r
rx['path']=p1
rx['dict']=r['dict']
# Check info file
p2=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_info'])
if os.path.isfile(p2):
r=load_json_file({'json_file':p2})
if r['return']>0: return r
rx['path_info']=p2
rx['info']=r['dict']
# Check updates file
if slu!='yes':
p3=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_updates'])
if os.path.isfile(p3):
r=load_json_file({'json_file':p3})
if r['return']>0: return r
rx['path_updates']=p3
rx['updates']=r['dict']
# Check desc file
if sld!='yes':
p4=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_desc'])
if os.path.isfile(p4):
r=load_json_file({'json_file':p4})
if r['return']>0: return r
rx['path_desc']=p4
rx['desc']=r['dict']
return rx
else:
return {'return':1, 'error':'meta description is not found in path '+p}
##############################################################################
# Load (CK) python module
#
# TARGET: end users
def load_module_from_path(i):
"""
Input: {
path - module path
module_code_name - module name
(cfg) - configuration of the module if exists ...
(skip_init) - if 'yes', skip init
(data_uoa) - module UOA (useful when printing error)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
code - python code object
path - full path to the module
cuid - internal UID of the module
}
"""
p=i['path']
n=i['module_code_name']
xcfg=i.get('cfg',None)
# Find module
try:
x=imp.find_module(n, [p])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t find module code (path='+p+', name='+n+', err='+format(e)+')'}
ff=x[0]
full_path=x[1]
# Check if code has been already loaded
if full_path in work['cached_module_by_path'] and work['cached_module_by_path_last_modification'][full_path]==os.path.getmtime(full_path):
ff.close()
# Code already loaded
return work['cached_module_by_path'][full_path]
# Check if has dependency on specific CK kernel version
if xcfg!=None:
kd=xcfg.get('min_kernel_dep','')
if kd!='':
rx=check_version({'version':kd})
if rx['return']>0: return rx
ok=rx['ok']
version_str=rx['current_version']
if ok!='yes':
return {'return':1, 'error':'module "'+i.get('data_uoa','')+'" requires minimal CK kernel version '+kd+' while your version is '+version_str}
# Generate uid for the run-time extension of the loaded module
# otherwise modules with the same extension (key.py for example)
# will be reloaded ...
r=gen_uid({})
if r['return']>0: return r
ruid='rt-'+r['data_uid']
try:
c=imp.load_module(ruid, ff, full_path, x[2])
except ImportError as e: # pragma: no cover
return {'return':1, 'error':'can\'t load module code (path='+p+', name='+n+', err='+format(e)+')'}
x[0].close()
# Initialize module with this CK instance
c.ck=sys.modules[__name__]
if xcfg!=None: c.cfg=xcfg
# Initialize module
if i.get('skip_init','')!='yes':
# Check if init function exists
if getattr(c, 'init')!=None:
r=c.init(i)
if r['return']>0: return r
r={'return':0, 'code':c, 'path':full_path, 'cuid':ruid}
# Cache code together with its time of change
work['cached_module_by_path'][full_path]=r
work['cached_module_by_path_last_modification'][full_path]=os.path.getmtime(full_path)
return r
##############################################################################
# Perform remote action via CK web service
#
# TARGET: CK kernel and low-level developers
def perform_remote_action(i):
"""
Input: { See 'perform_action' function }
Output: { See 'perform_action' function }
"""
# Import modules compatible with Python 2.x and 3.x
import urllib
try: import urllib.request as urllib2
except: import urllib2 # pragma: no cover
try: from urllib.parse import urlencode
except: from urllib import urlencode # pragma: no cover
rr={'return':0}
# Get action
act=i.get('action','')
# Check output
o=i.get('out','')
if o=='con':
# out('Initiating remote access ...')
# out('')
i['out']='con'
i['quiet']='yes'
if act=='pull':
i['out']='json'
else:
i['out']='json'
# # Clean up input
# if o!='json_file':
# rr['out']='json' # Decided to return json to show that it's remote ...
if 'cid' in i:
del(i['cid']) # already processed
# Get URL
url=i.get('remote_server_url','')
# Process i
if 'remote_server_url' in i: del(i['remote_server_url'])
# Pre process if push file ...
if act=='push':
# Check file
fn=i.get('filename','')
if fn=='':
x=i.get('cids',[])
if len(x)>0:
fn=x[0]
if fn=='':
return {'return':1, 'error':'filename is empty'}
if not os.path.isfile(fn):
return {'return':1, 'error':'file '+fn+' not found'}
rx=convert_file_to_upload_string({'filename':fn})
if rx['return']>0: return rx
i['file_content_base64']=rx['file_content_base64']
# Leave only filename without path
i['filename']=os.path.basename(fn)
# Prepare post variables
r=dumps_json({'dict':i, 'skip_indent':'yes'})
if r['return']>0: return r
s=r['string'].encode('utf8')
post=urlencode({'ck_json':s})
if sys.version_info[0]>2: post=post.encode('utf8')
# If auth
au=i.get('remote_server_user','')
if au!='':
del(i['remote_server_user'])
ap=i.get('remote_server_pass','')
if ap!='':
del(i['remote_server_pass'])
auth = urllib2.HTTPPasswordMgrWithDefaultRealm()
auth.add_password(None, url, au, ap)
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPBasicAuthHandler(auth)))
# Prepare request
request = urllib2.Request(url, post)
# Connect
try:
f=urllib2.urlopen(request)
except Exception as e:
return {'return':1, 'error':'Access to remote CK repository failed ('+format(e)+')'}
# Read from Internet
try:
s=f.read()
f.close()
except Exception as e:
return {'return':1, 'error':'Failed reading stream from remote CK web service ('+format(e)+')'}
# Check output
try: s=s.decode('utf8')
except Exception as e: pass
if o=='con' and act!='pull':
out(s.rstrip())
else:
# Try to convert output to dictionary
r=convert_json_str_to_dict({'str':s, 'skip_quote_replacement':'yes'})
if r['return']>0:
return {'return':1, 'error':'can\'t parse output from remote CK server ('+r['error']+'):\n'+s[:256]+'\n\n...)'}
d=r['dict']
if 'return' in d: d['return']=int(d['return']) # Fix for some strange behavior when 'return' is not integer - should check why ...
if d.get('return',0)>0:
return d
# Post process if pull file ...
if act=='pull':
if o!='json' and o!='json_file':
# Convert encoded file to real file ...
x=d.get('file_content_base64','')
fn=d.get('filename','')
if fn=='': fn=cfg['default_archive_name']
r=convert_upload_string_to_file({'file_content_base64':x, 'filename':fn})
if r['return']>0: return r
if 'file_content_base64' in d: del(d['file_content_base64'])
rr.update(d)
# Restore original output
i['out']=o
return rr
##############################################################################
# Perform action (find module or use kernel)
#
# TARGET: CK kernel and low-level developers
def perform_action(i):
"""
Input: {
all parameters from function 'access'
(web) - if 'yes', called from the web
(common_func) - if 'yes', ignore search for modules
and call common func from the CK kernel
(local) - if 'yes', run locally even if remote repo ...
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(out) - if action change output, return it
Output from the module/action
}
"""
# Check action
action=i.get('action','')
if action=='':
action='short_help'
elif action=='-?' or action=='-h' or action=='--help':
action='help'
# Check web
wb=i.get('web','')
# Substitute # in CIDs
cid=i.get('cid','')
cids=i.get('cids',[])
out=i.get('out','')
need_subst=False
rc={} # If CID from current directory
if cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']):
need_subst=True
else:
for c in cids:
if c.startswith(cfg['detect_cur_cid']) or c.startswith(cfg['detect_cur_cid1']):
need_subst=True
break
# If need to substitute #, attempt to detect current CID
if need_subst:
rc=detect_cid_in_current_path({})
if rc['return']>0: return rc
# Process cid (module or CID)
module_uoa=cid
if cid.find(':')>=0 or cid.startswith(cfg['detect_cur_cid']) or cid.startswith(cfg['detect_cur_cid1']):
# Means that CID
r=parse_cid({'cid':cid, 'cur_cid':rc})
if r['return']>0: return r
module_uoa=r.get('module_uoa','')
duoa=r.get('data_uoa','')
if duoa!='': i['data_uoa']=duoa
ruoa=r.get('repo_uoa','')
if ruoa!='': i['repo_uoa']=ruoa
# If module_uoa exists in input, set module_uoa
if i.get('module_uoa','')!='': module_uoa=i['module_uoa']
i['module_uoa']=module_uoa
# Check if repo exists and possibly remote!
remote=False
local=i.get('local','')
rs=i.get('remote_server_url','')
if rs=='':
ruoa=i.get('repo_uoa','')
if ruoa!='' and ruoa.find('*')<0 and ruoa.find('?')<0:
rq=load_repo_info_from_cache({'repo_uoa':ruoa})
if rq['return']>0: return rq
dd=rq.get('dict',{})
if dd.get('remote','')=='yes' and local!='yes':
rs=dd.get('url','')
if rs=='':
return {'return':1, 'error':'URL of remote repository is not defined'}
i['remote_server_url']=rs
if dd.get('remote_user','')!='':
i['remote_server_user']=dd['remote_user']
# It is completely unsave - just for proof of concept ...
if dd.get('remote_password','')!='':
i['remote_server_pass']=dd['remote_password']
if dd.get('remote_repo_uoa','')!='':
i['repo_uoa']=dd['remote_repo_uoa']
else:
del (i['repo_uoa'])
if i.get('remote_repo_uoa','')!='':
i['repo_uoa']=i['remote_repo_uoa']
del(i['remote_repo_uoa'])
if rs!='' and local!='yes':
return perform_remote_action(i)
# Process and parse cids -> xcids
xcids=[]
for c in cids:
r=parse_cid({'cid':c, 'cur_cid':rc, 'ignore_error':'yes'}) # here we ignore errors, since can be a file name, etc
if r['return']>0: return r
xcids.append(r)
i['xcids']=xcids
# Check if common function
cf=i.get('common_func','')
# Check if no module_uoa, not common function, then try to get module from current
module_detected_from_dir=False
if not need_subst and cf!='yes' and module_uoa=='' and action not in cfg['common_actions']:
rc=detect_cid_in_current_path({})
if rc['return']==0:
module_uoa=rc.get('module_uoa','')
module_detected_from_dir=True