@@ -267,7 +267,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromInput(
267
267
auto buffers = input_buffers[buffer_index];
268
268
for (size_t i = 0 ; i < buffers.size (); i++) {
269
269
auto &buffer = buffers[i];
270
- auto &offset = input_offsets[i ];
270
+ auto &offset = input_offsets[buffer_index ];
271
271
272
272
request->inputs_ .push_back (std::move (
273
273
InferenceRequestInputBuilder::fromInput (input, buffer, offset)));
@@ -280,7 +280,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromInput(
280
280
if (batch_offset == batch_size) {
281
281
batch_offset = 0 ;
282
282
buffer_index++;
283
- std::fill (input_offsets.begin (), input_offsets.end (), 0 );
283
+ // std::fill(input_offsets.begin(), input_offsets.end(), 0);
284
284
}
285
285
}
286
286
@@ -294,7 +294,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromInput(
294
294
auto buffers = output_buffers[buffer_index];
295
295
for (size_t i = 0 ; i < buffers.size (); i++) {
296
296
auto &buffer = buffers[i];
297
- auto &offset = output_offsets[i ];
297
+ auto &offset = output_offsets[buffer_index ];
298
298
299
299
request->outputs_ .emplace_back (output);
300
300
request->outputs_ .back ().setData (
@@ -317,7 +317,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromInput(
317
317
auto buffers = output_buffers[buffer_index];
318
318
for (size_t j = 0 ; j < buffers.size (); j++) {
319
319
auto &buffer = buffers[j];
320
- const auto &offset = output_offsets[j ];
320
+ const auto &offset = output_offsets[buffer_index ];
321
321
322
322
request->outputs_ .emplace_back ();
323
323
request->outputs_ .back ().setData (
@@ -335,49 +335,6 @@ InferenceRequestPtr InferenceRequestBuilder::fromInput(
335
335
}
336
336
}
337
337
338
- // try {
339
- // auto buffers = input_buffers[buffer_index];
340
- // for (size_t i = 0; i < buffers.size(); i++) {
341
- // auto &buffer = buffers[i];
342
- // auto &offset = input_offsets[i];
343
-
344
- // request->inputs_.push_back(std::move(InferenceRequestInputBuilder::fromInput(req,
345
- // buffer, offset))); offset += request->inputs_.back().getSize();
346
- // }
347
- // } catch (const std::invalid_argument &e) {
348
- // throw;
349
- // }
350
-
351
- // try {
352
- // auto buffers = output_buffers[buffer_index];
353
- // for (size_t i = 0; i < buffers.size(); i++) {
354
- // auto &buffer = buffers[i];
355
- // const auto &offset = output_offsets[i];
356
-
357
- // request->outputs_.emplace_back();
358
- // request->outputs_.back().setData(static_cast<std::byte
359
- // *>(buffer->data()) +
360
- // offset);
361
- // // TODO(varunsh): output_offset is currently ignored! The size of the
362
- // // output needs to come from the worker but we have no such
363
- // information.
364
- // }
365
- // } catch (const std::invalid_argument &e) {
366
- // throw;
367
- // }
368
-
369
- // batch_offset++;
370
- // // FIXME(varunsh): this was intended to support multiple input tensors but
371
- // it
372
- // // creates a bug where the batch_offset gets reset to zero too early
373
- // (void)batch_size;
374
- // // if (batch_offset == batch_size) {
375
- // // batch_offset = 0;
376
- // // buffer_index++;
377
- // // std::fill(input_offsets.begin(), input_offsets.end(), 0);
378
- // // std::fill(output_offsets.begin(), output_offsets.end(), 0);
379
- // // }
380
-
381
338
return request;
382
339
}
383
340
@@ -423,7 +380,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromJson(
423
380
auto &buffers = input_buffers[buffer_index];
424
381
for (size_t j = 0 ; j < buffers.size (); j++) {
425
382
auto &buffer = buffers[j];
426
- auto &offset = input_offsets[j ];
383
+ auto &offset = input_offsets[buffer_index ];
427
384
428
385
auto input = InferenceRequestInputBuilder::fromJson (
429
386
std::make_shared<Json::Value>(i), buffer, offset);
@@ -438,7 +395,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromJson(
438
395
if (batch_offset == batch_size) {
439
396
batch_offset = 0 ;
440
397
buffer_index++;
441
- std::fill (input_offsets.begin (), input_offsets.end (), 0 );
398
+ // std::fill(input_offsets.begin(), input_offsets.end(), 0);
442
399
}
443
400
}
444
401
@@ -453,7 +410,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromJson(
453
410
auto buffers = output_buffers[buffer_index];
454
411
for (size_t j = 0 ; j < buffers.size (); j++) {
455
412
auto &buffer = buffers[j];
456
- auto &offset = output_offsets[j ];
413
+ auto &offset = output_offsets[buffer_index ];
457
414
458
415
auto output = InferenceRequestOutputBuilder::fromJson (
459
416
std::make_shared<Json::Value>(i));
@@ -472,7 +429,7 @@ InferenceRequestPtr InferenceRequestBuilder::fromJson(
472
429
auto buffers = output_buffers[buffer_index];
473
430
for (size_t j = 0 ; j < buffers.size (); j++) {
474
431
auto &buffer = buffers[j];
475
- const auto &offset = output_offsets[j ];
432
+ const auto &offset = output_offsets[buffer_index ];
476
433
477
434
request->outputs_ .emplace_back ();
478
435
request->outputs_ .back ().setData (
0 commit comments