/
NamedOnnxValue.shared.cs
308 lines (283 loc) · 12.5 KB
/
NamedOnnxValue.shared.cs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
using Microsoft.ML.OnnxRuntime.Tensors;
using System;
using System.Buffers;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
namespace Microsoft.ML.OnnxRuntime
{
/// <summary>
/// The class holds keys and values for the dictionary
/// in a for of two DenseTensors. The class is used to avoid
/// data copy and make these available to the native code.
/// Strings require special handling.
/// </summary>
internal class MapHelper
{
internal MapHelper(TensorBase keys, TensorBase values)
{
Keys = keys;
Values = values;
}
internal TensorBase Keys { get; } // DenseTensor<K>
internal TensorBase Values { get; } // DenseTensor<V>
}
/// <summary>
/// This is a legacy class that is kept for backward compatibility.
/// Use OrtValue based API.
///
/// The class associates a name with an Object.
/// The name of the class is a misnomer, it does not hold any Onnx values,
/// just managed representation of them.
///
/// The class is currently used as both inputs and outputs. Because it is non-
/// disposable, it can not hold on to any native objects.
///
/// When used as input, we temporarily create OrtValues that map managed inputs
/// directly. Thus we are able to avoid copying of contiguous data.
///
/// For outputs, tensor buffers works the same as input, providing it matches
/// the expected output shape. For other types (maps and sequences) we create a copy of the data.
/// This is because, the class is not Disposable and it is a public interface, thus it can not own
/// the underlying OrtValues that must be destroyed before Run() returns.
///
/// To avoid data copying on output, use DisposableNamedOnnxValue class that is returned from Run() methods.
/// This provides access to the native memory tensors and avoids copying.
///
/// It is a recursive structure that may contain Tensors (base case)
/// Other sequences and maps. Although the OnnxValueType is exposed,
/// the caller is supposed to know the actual data type contained.
///
/// The convention is that for tensors, it would contain a DenseTensor<T> instance or
/// anything derived from Tensor<T>.
///
/// For sequences, it would contain a IList<T> where T is an instance of NamedOnnxValue that
/// would contain a tensor or another type.
///
/// For Maps, it would contain a IDictionary<K, V> where K,V are primitive types or strings.
///
/// </summary>
public class NamedOnnxValue
{
/// <summary>
/// Managed Tensor, Dictionary or IList
/// </summary>
private Object _value;
/// <summary>
/// Name of the instance, model input/output
/// </summary>
private string _name;
private MapHelper _mapHelper; // used for maps, otherwise null
/// <summary>
/// Constructs an instance of NamedOnnxValue and represents
/// a model input to an inference session.
/// </summary>
/// <param name="name">input/output name</param>
/// <param name="value">Object that may be a tensor, Dictionary, IList</param>
[Obsolete("Use constructors with valueType or static factory methods")]
protected NamedOnnxValue(string name, Object value)
{
_name = name;
_value = value;
ValueType = OnnxValueType.ONNX_TYPE_UNKNOWN;
}
/// <summary>
/// Constructs an instance that contains a tensor, sequence or optional type.
/// </summary>
/// <param name="name"></param>
/// <param name="value"></param>
/// <param name="valueType"></param>
internal NamedOnnxValue(string name, Object value, OnnxValueType valueType)
{
_name = name;
_value = value;
ValueType = valueType;
if (valueType == OnnxValueType.ONNX_TYPE_MAP)
{
throw new OnnxRuntimeException(ErrorCode.InvalidArgument, "Use another __ctor for maps");
}
}
/// <summary>
/// Use this to construct maps
/// </summary>
/// <param name="name"></param>
/// <param name="value"></param>
/// <param name="helper"></param>
internal NamedOnnxValue(string name, Object value, MapHelper helper)
{
_name = name;
_value = value;
ValueType = OnnxValueType.ONNX_TYPE_MAP;
_mapHelper = helper;
}
/// <summary>
/// Onnx Value Type if known. In general, NamedOnnxValue is able to contain
/// arbitrary objects. Please, follow the convention described in the class doc.
/// </summary>
public OnnxValueType ValueType { get; internal set; }
/// <summary>
/// This is a factory method that instantiates NamedOnnxValue
/// and associated name with an instance of a Tensor<typeparamref name="T"/>
/// </summary>
/// <typeparam name="T"></typeparam>
/// <param name="name">name</param>
/// <param name="value">Tensor<typeparamref name="T"/></param>
/// <returns></returns>
public static NamedOnnxValue CreateFromTensor<T>(string name, Tensor<T> value)
{
return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_TENSOR);
}
/// <summary>
/// This is a factory method that instantiates NamedOnnxValue.
/// It would contain a sequence of elements
/// </summary>
/// <param name="name"></param>
/// <param name="value"></param>
/// <returns></returns>
public static NamedOnnxValue CreateFromSequence<T>(string name, IEnumerable<T> value)
{
return new NamedOnnxValue(name, value, OnnxValueType.ONNX_TYPE_SEQUENCE);
}
/// <summary>
/// Instantiates NamedOnnxValue that contains IDictionary<K, V>
/// </summary>
/// <typeparam name="K">Keys type</typeparam>
/// <typeparam name="V">Values type</typeparam>
/// <param name="name"></param>
/// <param name="value"></param>
/// <returns>new instance of NamedOnnxValue</returns>
public static NamedOnnxValue CreateFromMap<K, V>(string name, IDictionary<K, V> value)
{
// The order in which Keys and Values are unspecified,
// but it is guaranteed to be the same order
// These tensors are 1-D
return CreateFromMap<K, V>(name, value.Keys, value.Values);
}
internal static NamedOnnxValue CreateFromMap<K, V>(string name, ICollection<K> keys, ICollection<V> values)
{
var keysTensor = new DenseTensor<K>(keys.ToArray(), new int[1] { keys.Count });
var valuesTensor = new DenseTensor<V>(values.ToArray(), new int[1] { values.Count });
return new NamedOnnxValue(name, values, new MapHelper(keysTensor, valuesTensor));
}
/// <summary>
/// Exposes the name of the of the model input/output
/// </summary>
/// <value>name string</value>
public string Name { get { return _name; } set { _name = value; } }
/// <summary>
/// Exposes the underlying managed object
/// </summary>
/// <value>object</value>
public Object Value { get { return _value; } set { _value = value; } }
/// <summary>
/// Try-get value as a Tensor<T>.
/// </summary>
/// <typeparam name="T">Type</typeparam>
/// <returns>Tensor object if contained value is a Tensor. Null otherwise</returns>
public Tensor<T> AsTensor<T>()
{
return _value as Tensor<T>; // will return null if not castable
}
/// <summary>
/// Try-get value as an Enumerable<T>.
/// T is usually a NamedOnnxValue instance that may contain
/// Tensors, Sequences, Maps or optional types
/// </summary>
/// <typeparam name="T">Type</typeparam>
/// <returns>Enumerable object if contained value is a Enumerable. Null otherwise</returns>
public IEnumerable<T> AsEnumerable<T>()
{
var x = _value as IEnumerable<T>;
return x;
}
/// <summary>
/// Try-get value as an Dictionary<K,V>.
/// </summary>
/// <typeparam name="K">Key type currently primitive type only</typeparam>
/// <typeparam name="V">Value type, currently primitive type only</typeparam>
/// <returns>Dictionary object if contained value is a Dictionary. Null otherwise</returns>
public IDictionary<K, V> AsDictionary<K, V>()
{
return _value as IDictionary<K, V>;
}
/// <summary>
/// Pin the underlying memory and create an instance of OrtValue containing a tensor
/// based on the pinned managed memory. The caller is responsible for Disposing
/// both OrtValue and pinnedMemoryHandle
/// </summary>
/// <param name="pinnedMemoryHandle">dispose after returned OrtValus is disposed</param>
/// <returns>The native OrtValue handle</returns>
internal virtual IntPtr InputToOrtValueHandle(NodeMetadata metadata, out IDisposable memoryOwner)
{
var projection = ManagedTypeProjection.CreateProjection(this, metadata);
memoryOwner = projection;
return projection.Handle;
}
/// <summary>
/// Produces an output value for outputs. This produces an output value
/// only for tensors or optional types that can contain a tensor.
/// For all other Onnx value types, this method throws. Use Run() overloads
/// that return DisposableNamedOnnxValue to get access to all Onnx value types
/// that may be returned as output.
/// </summary>
/// <param name="metadata"></param>
/// <param name="memoryOwner"></param>
/// <returns></returns>
internal virtual IntPtr OutputToOrtValueHandle(NodeMetadata metadata, out IDisposable memoryOwner)
{
// For NamedOnnxValue for output we only allow to produce OrtValue for tensors
// or optional type that may contain a tensor
if (metadata.OnnxValueType == OnnxValueType.ONNX_TYPE_TENSOR)
{
var projection = ManagedTypeProjection.CreateProjection(this, metadata);
memoryOwner = projection;
return projection.Handle;
}
if (metadata.OnnxValueType == OnnxValueType.ONNX_TYPE_OPTIONAL)
{
var meta = metadata.AsOptionalMetadata().ElementMeta;
if (meta.OnnxValueType == OnnxValueType.ONNX_TYPE_TENSOR)
{
var projection = ManagedTypeProjection.CreateProjection(this, metadata);
memoryOwner = projection;
return projection.Handle;
}
}
throw new OnnxRuntimeException(ErrorCode.NotImplemented,
$"Can not create output OrtValue for NamedOnnxValue '{metadata.OnnxValueType}' type." +
$" Only tensors can be pre-allocated for outputs " +
$" Use Run() overloads that return DisposableNamedOnnxValue to get access to all Onnx value types that may be returned as output.");
}
/// <summary>
/// This method is used internally to feed dictionary keys
/// to create an OrtValue for map keys
/// </summary>
/// <typeparam name="K"></typeparam>
/// <returns>DenseTensor<K>"</returns>
internal TensorBase GetDictionaryKeys()
{
if (ValueType != OnnxValueType.ONNX_TYPE_MAP)
{
throw new OnnxRuntimeException(ErrorCode.Fail, "This NamedOnnxValue instance does not contain a dictionary");
}
Debug.Assert(_mapHelper != null);
return _mapHelper.Keys;
}
/// <summary>
///
/// </summary>
/// <typeparam name="V"></typeparam>
/// <returns>DenseTensor<V>"</returns>
internal TensorBase GetDictionaryValues()
{
if (ValueType != OnnxValueType.ONNX_TYPE_MAP)
{
throw new OnnxRuntimeException(ErrorCode.Fail, "This NamedOnnxValue instance does not contain a dictionary");
}
Debug.Assert(_mapHelper != null);
return _mapHelper.Values;
}
}
}