Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:

build:
name: Build from OpenVINO source
runs-on: ubuntu-latest
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
Expand Down
58 changes: 23 additions & 35 deletions crates/openvino/tests/classify-inception.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,38 +9,25 @@ use std::fs;
#[test]
fn classify_inception() {
let mut core = Core::new(None).unwrap();
let mut network = core
let network = core
.read_network_from_file(
&Fixture::graph().to_string_lossy(),
&Fixture::weights().to_string_lossy(),
)
.unwrap();

let input_name = &network.get_input_name(0).unwrap();
assert_eq!(input_name, "image_tensor");
assert_eq!(input_name, "input");
let output_name = &network.get_output_name(0).unwrap();
assert_eq!(output_name, "DetectionOutput");

// Prepare inputs and outputs for resizing, since our input tensor is not the size the model expects.
network
.set_input_resize_algorithm(input_name, ResizeAlgorithm::RESIZE_BILINEAR)
.unwrap();
network.set_input_layout(input_name, Layout::NHWC).unwrap();
network
.set_input_precision(input_name, Precision::U8)
.unwrap();
network
.set_output_precision(output_name, Precision::FP32)
.unwrap();
assert_eq!(output_name, "InceptionV3/Predictions/Softmax");

// Load the network.
let mut executable_network = core.load_network(&network, "CPU").unwrap();
let mut infer_request = executable_network.create_infer_request().unwrap();
// TODO eventually, this should not panic: infer_request.set_batch_size(1).unwrap();

// Read the image.
let tensor_data = fs::read(Fixture::tensor()).unwrap();
let tensor_desc = TensorDesc::new(Layout::NHWC, &[1, 3, 481, 640], Precision::U8);
let tensor_desc = TensorDesc::new(Layout::NHWC, &[1, 3, 299, 299], Precision::FP32);
let blob = Blob::new(tensor_desc, &tensor_data).unwrap();

// Execute inference.
Expand All @@ -60,30 +47,31 @@ fn classify_inception() {
assert_eq!(
&results[..5],
&[
Result(15, 59.0),
Result(1, 1.0),
Result(8, 1.0),
Result(12, 1.0),
Result(16, 0.9939936),
Result(964, 0.9648312),
Result(763, 0.0015633557),
Result(412, 0.0007776478),
Result(814, 0.0006391522),
Result(924, 0.0006150733),
][..]
)

// This above results match the output of running OpenVINO's `hello_classification` with the same inputs:
// $ bin/intel64/Debug/hello_classification /tmp/fixture/frozen_inference_graph.xml /tmp/fixture/000000062808.jpg CPU
// The results above almost match the output of OpenVINO's `hello_classification` with similar
// inputs:
// $ bin/intel64/Debug/hello_classification ../inception.xml ../pizza.jpg CPU
// Top 10 results:
// Image /tmp/fixture/000000062808.jpg
// Image ../pizza.jpg
// classid probability
// ------- -----------
// 15 59.0000000
// 1 1.0000000
// 12 1.0000000
// 8 1.0000000
// 16 0.9939936
// 2 0.9750488
// 9 0.9535966
// 20 0.8796915
// 13 0.8178773
// 6 0.8092338
// 964 0.9656160
// 763 0.0015505
// 412 0.0007806
// 924 0.0006135
// 814 0.0006102
// 966 0.0005903
// 960 0.0004972
// 522 0.0003951
// 927 0.0003644
// 923 0.0002908
}

/// A structure for holding the `(category, probability)` pair extracted from the output tensor of
Expand Down
110 changes: 110 additions & 0 deletions crates/openvino/tests/detect-inception.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
//! Demonstrates using `openvino-rs` to classify an image using an Inception SSD model and a prepared input tensor. See
//! [README](fixtures/inception/README.md) for details on how this test fixture was prepared.
mod fixtures;

use fixtures::inception_ssd::Fixture;
use openvino::{Blob, Core, Layout, Precision, ResizeAlgorithm, TensorDesc};
use std::fs;

#[test]
fn detect_inception() {
let mut core = Core::new(None).unwrap();
let mut network = core
.read_network_from_file(
&Fixture::graph().to_string_lossy(),
&Fixture::weights().to_string_lossy(),
)
.unwrap();

let input_name = &network.get_input_name(0).unwrap();
assert_eq!(input_name, "image_tensor");
let output_name = &network.get_output_name(0).unwrap();
assert_eq!(output_name, "DetectionOutput");

// Prepare inputs and outputs for resizing, since our input tensor is not the size the model expects.
network
.set_input_resize_algorithm(input_name, ResizeAlgorithm::RESIZE_BILINEAR)
.unwrap();
network.set_input_layout(input_name, Layout::NHWC).unwrap();
network
.set_input_precision(input_name, Precision::U8)
.unwrap();
network
.set_output_precision(output_name, Precision::FP32)
.unwrap();

// Load the network.
let mut executable_network = core.load_network(&network, "CPU").unwrap();
let mut infer_request = executable_network.create_infer_request().unwrap();

// Read the image.
let tensor_data = fs::read(Fixture::tensor()).unwrap();
let tensor_desc = TensorDesc::new(Layout::NHWC, &[1, 3, 481, 640], Precision::U8);
let blob = Blob::new(tensor_desc, &tensor_data).unwrap();

// Execute inference.
infer_request.set_blob(input_name, blob).unwrap();
infer_request.infer().unwrap();
let mut results = infer_request.get_blob(output_name).unwrap();
let buffer = unsafe { results.buffer_mut_as_type::<f32>().unwrap().to_vec() };

// Sort results (TODO extract bounding boxes instead).
let mut results: Results = buffer
.iter()
.enumerate()
.map(|(c, p)| Result(c, *p))
.collect();
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());

assert_eq!(
&results[..5],
&[
Result(15, 59.0),
Result(1, 1.0),
Result(8, 1.0),
Result(12, 1.0),
Result(16, 0.9939936),
][..]
)

// This above results should match the output of running OpenVINO's
// `object_detection_sample_ssd` with the same inputs. This test incorrectly uses result
// sorting instead of extracting the bounding boxes like `object_detection_sample_ssd` does
// (FIXME):
// $ bin/intel64/Debug/object_detection_sample_ssd -m ../inception-ssd.xml -i ../pizza.jpg
// [ INFO ] InferenceEngine:
// API version ............ 2.1
// Build .................. custom_master_a1d858c5028c1a26d37286913d64028849454b75
// Description ....... API
// Parsing input parameters
// [ INFO ] Files were added: 1
// [ INFO ] ../pizza.jpg
// [ INFO ] Loading Inference Engine
// [ INFO ] Device info:
// CPU
// MKLDNNPlugin version ......... 2.1
// Build ........... custom_master_a1d858c5028c1a26d37286913d64028849454b75
// [ INFO ] Loading network files:
// ../inception-ssd.xml
// [ INFO ] Preparing input blobs
// [ INFO ] Batch size is 1
// [ INFO ] Preparing output blobs
// [ INFO ] Loading model to the device
// [ INFO ] Create infer request
// [ WARNING ] Image is resized from (640, 481) to (300, 300)
// [ INFO ] Batch size is 1
// [ INFO ] Start inference
// [ INFO ] Processing output blobs
// [0,1] element, prob = 0.975312 (1,19)-(270,389) batch id : 0 WILL BE PRINTED!
// [1,1] element, prob = 0.953244 (368,17)-(640,393) batch id : 0 WILL BE PRINTED!
// [2,59] element, prob = 0.993812 (143,280)-(502,423) batch id : 0 WILL BE PRINTED!
// [3,67] element, prob = 0.301402 (5,369)-(582,480) batch id : 0
// [ INFO ] Image out_0.bmp created!
// [ INFO ] Execution successful
}

/// A structure for holding the `(category, probability)` pair extracted from the output tensor of
/// the OpenVINO classification.
#[derive(Debug, PartialEq)]
struct Result(usize, f32);
type Results = Vec<Result>;
12 changes: 7 additions & 5 deletions crates/openvino/tests/fixtures/inception/README.md
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
# Inception SSD Test Fixture

In order to test the use of `openvino-rs` in the real world, here we include the necessary files for performing the
classification in [classify-inception.rs](../classify-inception.rs). The artifacts are included in-tree and can be
rebuilt using the [build.sh] script (with the right system dependencies). The artifacts include:
- the Inception SSD inference model, converted to OpenVINO IR format (`*.bin`, `*.mapping`, `*.xml`)
In order to test the use of `openvino-rs` in the real world, here we include the necessary files for
performing the classification in [classify-inception.rs](../classify-inception.rs). The artifacts
are included in-tree and can be rebuilt using the [build.sh] script (with the right system
dependencies). The artifacts include:
- the Inception v3 classification model, converted to OpenVINO IR format (`*.bin`, `*.mapping`,
`*.xml`)
- an image from the COCO dataset transformed into the correct tensor format (`tensor-*.bgr`)

The [mod.sh] `Fixture` provides the correct artifact paths in Rust.
The [mod.sh] `Fixture` provides the correct artifact paths in Rust.
28 changes: 16 additions & 12 deletions crates/openvino/tests/fixtures/inception/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,24 @@ OPENVINO_DIR=${OPENVINO_DIR:-$(realpath $FIXTURE_DIR/../../../../../../openvino)
PYTHON=${PYTHON:-python3}
pushd $TMP_DIR

# Retrieve the MobileNet model from the TensorFlow
wget --no-clobber http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz
tar xzvf ssd_*.tar.gz
ln -sf ssd_inception_v2_coco_2018_01_28 model
# Retrieve the Inception model from the TensorFlow
wget --no-clobber https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz
tar xzvf inception_*.tar.gz

# Convert the model to OpenVINO IR using the model-optimizer.
# Convert the model to OpenVINO IR using the model-optimizer; see
# https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/googlenet-v3/model.yml.
pip install --user -r $OPENVINO_DIR/model-optimizer/requirements_tf.txt
$PYTHON $OPENVINO_DIR/model-optimizer/mo_tf.py \
--input_model model/frozen_inference_graph.pb \
--transformations_config $OPENVINO_DIR/model-optimizer/extensions/front/tf/ssd_v2_support.json \
--tensorflow_object_detection_api_pipeline_config model/pipeline.config
cp $TMP_DIR/frozen_inference_graph.bin $FIXTURE_DIR/inception-ssd.bin
cp $TMP_DIR/frozen_inference_graph.mapping $FIXTURE_DIR/inception-ssd.mapping
cp $TMP_DIR/frozen_inference_graph.xml $FIXTURE_DIR/inception-ssd.xml
--reverse_input_channels \
--input_shape=[1,299,299,3] \
--input=input \
--mean_values=input[127.5,127.5,127.5] \
--scale_values=input[127.5] \
--output=InceptionV3/Predictions/Softmax \
--input_model=inception_v3_2016_08_28_frozen.pb
cp $TMP_DIR/inception_v3_2016_08_28_frozen.bin $FIXTURE_DIR/inception.bin
cp $TMP_DIR/inception_v3_2016_08_28_frozen.mapping $FIXTURE_DIR/inception.mapping
cp $TMP_DIR/inception_v3_2016_08_28_frozen.xml $FIXTURE_DIR/inception.xml

# Retrieve the first 10 images of the COCO dataset.
wget --no-clobber http://images.cocodataset.org/zips/val2017.zip
Expand All @@ -32,7 +36,7 @@ unzip -Z1 val2017.zip | head -n 10 | xargs unzip val2017.zip
popd

# Convert an image to raw tensor format
cargo run -p openvino-tensor-converter -- $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x640x481-u8.bgr 481x640x3xu8
cargo run -p openvino-tensor-converter -- $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x299x299-fp32.bgr 299x299x3xfp32

# Clean up.
rm -rf $TMP_DIR
3 changes: 3 additions & 0 deletions crates/openvino/tests/fixtures/inception/inception.bin
Git LFS file not shown
Loading