-
Notifications
You must be signed in to change notification settings - Fork 123
/
ttlfmt.py
executable file
·245 lines (205 loc) · 8.17 KB
/
ttlfmt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
#!/usr/bin/env python3
#!/usr/bin/env pypy3
"""Format ontology files using a uniform ttl serializer from rdflib
Usage:
ttlfmt [options]
ttlfmt [options] <file>...
Options:
-h --help print this
-v --verbose do something fun!
-a --vanilla use the regular rdflib turtle serializer
-y --subclass use the subClassOf turtle serializer
-c --compact use the compact turtle serializer
-u --uncompact use the uncompact turtle serializer
-r --racket use the racket turtle serializer
-j --jsonld use the rdflib-jsonld serializer
-f --format=FM specify the input format (used for pipes)
-t --outfmt=F specify the output format [default: nifttl]
-s --slow do not use a process pool
-n --nowrite parse the file and reserialize it but do not write changes
-o --output=FI serialize all input files to output file
-p --profile enable profiling on parsing and serialization
-d --debug launch debugger after parsing and before serialization
"""
import os
import sys
from io import StringIO, TextIOWrapper
from json.decoder import JSONDecodeError
from concurrent.futures import ProcessPoolExecutor
from docopt import docopt, parse_defaults
import rdflib
from rdflib.plugins.parsers.notation3 import BadSyntax
from ttlser.utils import regjsonld
defaults = {o.name:o.value if o.argcount else None for o in parse_defaults(__doc__)}
#GRAPHCLASS = rdflib.Graph
GRAPHCLASS = rdflib.ConjunctiveGraph
def getVersion():
ttlser = rdflib.plugin.get('nifttl', rdflib.serializer.Serializer)
ttlser_version = ttlser._CustomTurtleSerializer__version # FIXME
version = "ttlfmt v0.0.2\nttlser {}".format(ttlser_version)
return version
def prepare(filepath_or_stream, outpath=None, stream=False):
if stream:
infmt_guess = 'turtle'
if outpath is None:
outpath = sys.stdout
else:
filepath_or_stream = os.path.expanduser(filepath_or_stream)
_, ext = os.path.splitext(filepath_or_stream)
filetype = ext.strip('.')
if filetype == 'ttl':
infmt_guess = 'ttl'
elif filetype in ('json', 'jsonld'):
infmt_guess = 'json-ld'
regjsonld()
else:
infmt_guess = None
if outpath is None:
outpath = filepath_or_stream
print(filepath_or_stream)
return dict(source=filepath_or_stream,
format_guess=infmt_guess,
outpath=outpath)
formats = ('ttl', 'json-ld', None, 'xml', 'n3', 'nt', 'nquads', 'trix',
'trig', 'hturtle', 'rdfa', 'mdata', 'rdfa1.0', 'html')
def parse(source, format_guess, outpath, graph=None, infmt=None, graph_class=GRAPHCLASS):
graph = graph_class() if graph is None else graph
errors = []
if infmt:
format_guess = infmt
for format in (format_guess, *(f for f in formats if f != format_guess)):
# TODO we don't need to reset just saved parsed streams to the point where they fail?
if type(source) == TextIOWrapper: # stdin can't reset
src = source.read()
source = StringIO(src)
try:
graph.parse(source=source, format=format)
a = next(iter(graph))
return graph, outpath
except (StopIteration, BadSyntax, JSONDecodeError) as e:
print('PARSING FAILED', format, source)
if infmt: # or format_guess != None:
raise e
errors.append(e)
if type(source) == StringIO:
source.seek(0)
raise BadSyntax(str(errors)) from errors[0]
def serialize(graph, outpath, outfmt=defaults['--outfmt'],
debug=False, profile=False, nowrite=False):
if debug:
if type(outpath) == type(sys.stdout):
pipe_debug(graph=graph, outpath=outpath)
else:
breakpoint()
elif profile and type(outpath) != type(sys.stdout):
*_, _out = outpath.rsplit('/', 1)
print('triple count for {_out}:'.format(_out=_out), len(graph))
if outfmt == 'json-ld':
kwargs = {'auto_compact': True}
else:
kwargs = {}
out = graph.serialize(format=outfmt, encoding='utf-8', **kwargs)
if nowrite:
print('FILE NOT WRITTEN {}'.format(outpath))
return
if profile:
print('PARSING Success', outpath)
elif not isinstance(outpath, str): # FIXME not a good test that it is stdout
outpath.buffer.write(out)
else:
with open(outpath, 'wb') as f:
f.write(out)
def convert(file_or_list_or_stream, outpath=None, stream=False,
infmt=None, outfmt=defaults['--outfmt'],
debug=False, profile=False, nowrite=False, graph_class=GRAPHCLASS):
if stream or type(file_or_list_or_stream) == str:
file_or_stream = file_or_list_or_stream
serialize(*parse(**prepare(file_or_stream, outpath, stream),
infmt=infmt),
outfmt=outfmt, debug=debug, profile=profile, nowrite=nowrite)
else:
# file list is used here because this allows is to merge files
# without any additional code if we pass it more than one file
# in normal use a tuple with a single element is passed in
file_list = file_or_list_or_stream
if outpath is not None:
graph = graph_class()
[parse(**prepare(file, outpath), graph=graph, infmt=infmt) for file in file_list]
serialize(graph, outpath, outfmt=outfmt,
debug=debug, profile=profile, nowrite=nowrite)
else:
[convert(file, infmt=infmt, outfmt=outfmt,
debug=debug, profile=profile,
graph_class=graph_class) for file in file_list]
def pipe_debug(*args, source=None, graph=None, outpath=None, **kwargs):
fn = sys.stdout.fileno()
tty = os.ttyname(fn)
with open(tty) as sys.stdin:
breakpoint()
def main():
#global args # vastly preferable to classing everything since this way we can see
#global outfmt # in 2 lines what is actually shared instead of stuffed into self
args = docopt(__doc__, version=getVersion())
profile = args['--profile']
if profile:
try:
from desc.prof import profile_me
global parse
global serialize
parse = profile_me(parse)
serialize = profile_me(serialize)
except ImportError:
pass
nowrite = args['--nowrite']
infmt = args['--format']
debug = args['--debug']
if args['--subclass']:
outfmt = 'scottl'
elif args['--vanilla']:
outfmt = 'turtle'
elif args['--compact']:
outfmt = 'cmpttl'
elif args['--uncompact']:
outfmt = 'uncmpttl'
elif args['--jsonld']:
outfmt = 'json-ld'
elif args['--racket']:
outfmt = 'rktttl'
else:
outfmt = args['--outfmt']
if outfmt == 'json-ld' or infmt == 'json-ld':
regjsonld()
outpath = args['--output']
files = args['<file>']
if not files:
from ttlser.utils import readFromStdIn
stdin = readFromStdIn(sys.stdin)
if stdin is not None:
convert(stdin, outpath, stream=True,
infmt=infmt, outfmt=outfmt,
debug=debug, profile=profile,
nowrite=nowrite)
else:
print(__doc__)
else:
lenfiles = len(files)
if outpath or args['--slow'] or lenfiles == 1:
if lenfiles == 1:
files, = files
convert(files, outpath=outpath,
infmt=infmt, outfmt=outfmt,
debug=debug, profile=profile,
nowrite=nowrite)
else:
from joblib import Parallel, delayed
nj = 9
if lenfiles < nj:
nj = lenfiles
Parallel(n_jobs=nj, verbose=10)(delayed(convert)
(file,
infmt=infmt, outfmt=outfmt,
debug=debug, profile=profile,
nowrite=nowrite)
for file in files)
if __name__ == '__main__':
main()