Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion crates/openvino-tensor-converter/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,3 @@ pretty_env_logger = "0.4"
structopt = { version = "0.3", default-features = false }
# Note: the features below are very system-specific, see https://github.com/twistedfall/opencv-rust.
opencv = {version = "0.45", default-features = false, features = ["opencv-32", "buildtime-bindgen", "clang-runtime"]}

98 changes: 98 additions & 0 deletions crates/openvino/tests/classify-mobilenet.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
//! Demonstrates using `openvino-rs` to classify an image using an MobileNet model and a prepared
//! input tensor. See [README](fixtures/inception/README.md) for details on how this test fixture
//! was prepared.
mod fixtures;

use fixtures::mobilenet::Fixture;
use float_cmp::approx_eq;
use openvino::{Blob, Core, Layout, Precision, TensorDesc};
use std::fs;

#[test]
fn classify_mobilenet() {
let mut core = Core::new(None).unwrap();
let network = core
.read_network_from_file(
&Fixture::graph().to_string_lossy(),
&Fixture::weights().to_string_lossy(),
)
.unwrap();

let input_name = &network.get_input_name(0).unwrap();
assert_eq!(input_name, "input");
let output_name = &network.get_output_name(0).unwrap();
assert_eq!(output_name, "MobilenetV2/Predictions/Reshape_1");

// Load the network.
let mut executable_network = core.load_network(&network, "CPU").unwrap();
let mut infer_request = executable_network.create_infer_request().unwrap();

// Read the image.
let tensor_data = fs::read(Fixture::tensor()).unwrap();
let tensor_desc = TensorDesc::new(Layout::NHWC, &[1, 3, 224, 224], Precision::FP32);
let blob = Blob::new(tensor_desc, &tensor_data).unwrap();

// Execute inference.
infer_request.set_blob(input_name, blob).unwrap();
infer_request.infer().unwrap();
let mut results = infer_request.get_blob(output_name).unwrap();
let buffer = unsafe { results.buffer_mut_as_type::<f32>().unwrap().to_vec() };

// Sort results. It is unclear why the MobileNet output indices are "off by one" but the
// `.skip(1)` below seems necessary to get results that make sense (e.g. 763 = "revolver" vs 762
// = "restaurant").
let mut results: Results = buffer
.iter()
.skip(1)
.enumerate()
.map(|(c, p)| Result(c, *p))
.collect();
results.sort_by(|a, b| b.1.partial_cmp(&a.1).unwrap());

// Compare results using approximate FP comparisons; annotated with classification tag from
// https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a.
results[0].assert_approx_eq(&Result(963, 0.7134405)); // pizza
results[1].assert_approx_eq(&Result(762, 0.0715866)); // restaurant
results[2].assert_approx_eq(&Result(909, 0.0360171)); // wok
results[3].assert_approx_eq(&Result(926, 0.0160412)); // hot pot
results[4].assert_approx_eq(&Result(567, 0.0152781)); // frying pan

// This above results almost match (see "off by one" comment above) the output of running
// OpenVINO's `hello_classification` with the same inputs:
// $ bin/intel64/Debug/hello_classification /tmp/mobilenet.xml /tmp/val2017/000000062808.jpg CPU
// Image /tmp/val2017/000000062808.jpg
// classid probability
// ------- -----------
// 964 0.7134405
// 763 0.0715866
// 910 0.0360171
// 927 0.0160412
// 568 0.0152781
// 924 0.0148565
// 500 0.0093886
// 468 0.0073142
// 965 0.0058377
// 545 0.0043731
}

/// A structure for holding the `(category, probability)` pair extracted from the output tensor of
/// the OpenVINO classification.
#[derive(Debug, PartialEq)]
struct Result(usize, f32);
type Results = Vec<Result>;

impl Result {
fn assert_approx_eq(&self, expected: &Result) {
assert_eq!(
self.0, expected.0,
"Expected class ID {} but found {}",
expected.0, self.0
);
let approx_matches = approx_eq!(f32, self.1, expected.1, ulps = 2, epsilon = 0.01);
assert!(
approx_matches,
"Expected probability {} but found {} (outside of tolerance)",
expected.1, self.1
);
}
}
4 changes: 2 additions & 2 deletions crates/openvino/tests/fixtures/alexnet/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ unzip -Z1 val2017.zip | head -n 10 | xargs unzip val2017.zip
popd

# Convert an image to raw tensor format
cargo run -p openvino-tensor-converter $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x227x227-f32.bgr 227x227x3xfp32
cargo run -p openvino-tensor-converter -- $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x227x227-f32.bgr 227x227x3xfp32

# Clean up.
#rm -rf $TMP_DIR
rm -rf $TMP_DIR
4 changes: 2 additions & 2 deletions crates/openvino/tests/fixtures/inception/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ unzip -Z1 val2017.zip | head -n 10 | xargs unzip val2017.zip
popd

# Convert an image to raw tensor format
cargo run -p openvino-tensor-converter $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x640x481-u8.bgr 481x640x3xu8
cargo run -p openvino-tensor-converter -- $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x3x640x481-u8.bgr 481x640x3xu8

# Clean up.
#rm -rf $TMP_DIR
rm -rf $TMP_DIR
12 changes: 12 additions & 0 deletions crates/openvino/tests/fixtures/mobilenet/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# MobileNet Test Fixture

In order to test the use of `openvino-rs` in the real world, here we include the necessary files for
performing the classification in [classify-mobilenet.rs](../classify-mobilenet.rs). The artifacts
are included in-tree and can be rebuilt using the [build.sh] script (with the right system
dependencies). The artifacts include:
- the MobileNet inference model, converted to OpenVINO IR format (`*.bin`, `*.mapping`, `*.xml`)
using [this
guide](https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/mobilenet-v2-1.0-224/model.yml)
- an image from the COCO dataset transformed into the correct tensor format (`tensor-*.bgr`)

The [mod.sh] `Fixture` provides the correct artifact paths in Rust.
45 changes: 45 additions & 0 deletions crates/openvino/tests/fixtures/mobilenet/build.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/bin/bash

# The following script rebuilds the test fixture committed in this directory. It relies on external tools that not all
# systems will have readily available and as such it should be used mainly as an example script. The artifacts created
# are checked in to save future users the setup time.
set -e
TMP_DIR=${1:-$(mktemp -d -t ci-XXXXXXXXXX)}
FIXTURE_DIR=$(dirname "$0" | xargs realpath)
OPENVINO_DIR=${OPENVINO_DIR:-$(realpath $FIXTURE_DIR/../../../../../../openvino)}
PYTHON=${PYTHON:-python3}
pushd $TMP_DIR

# Retrieve the MobileNet model, following
# https://github.com/openvinotoolkit/open_model_zoo/blob/master/models/public/mobilenet-v2-1.0-224/model.yml.
wget --no-clobber https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz
tar zxvf mobilenet_v2_1.0_224.tgz

# Convert the model to OpenVINO IR using the model-optimizer.
pip install --user -r $OPENVINO_DIR/model-optimizer/requirements_tf.txt
$PYTHON $OPENVINO_DIR/model-optimizer/mo_tf.py \
--reverse_input_channels \
--input_shape=[1,224,224,3] \
--input=input \
--mean_values=input[127.5,127.5,127.5] \
--scale_values=input[127.5] \
--output=MobilenetV2/Predictions/Reshape_1 \
--input_model=$TMP_DIR/mobilenet_v2_1.0_224_frozen.pb

cp $TMP_DIR/mobilenet_v2_1.0_224_frozen.bin $FIXTURE_DIR/mobilenet.bin
cp $TMP_DIR/mobilenet_v2_1.0_224_frozen.mapping $FIXTURE_DIR/mobilenet.mapping
cp $TMP_DIR/mobilenet_v2_1.0_224_frozen.xml $FIXTURE_DIR/mobilenet.xml

# Retrieve the first 10 images of the COCO dataset.
wget --no-clobber http://images.cocodataset.org/zips/val2017.zip
rm -rf val2017
unzip -Z1 val2017.zip | head -n 10 | xargs unzip val2017.zip
popd

# Convert an image to raw tensor format. Weirdly, this actually produces the correct output tensor:
# I would have expected to have to transpose tensor dimensions for MobileNet (i.e. 224x224x3 to
# 3x224x224) but this works. Perhaps the model-optimizer sets things to an OpenVINO-expected shape.
cargo run -p openvino-tensor-converter -- $TMP_DIR/val2017/000000062808.jpg $FIXTURE_DIR/tensor-1x224x224x3-f32.bgr 224x224x3xfp32

# Clean up.
rm -rf $TMP_DIR
3 changes: 3 additions & 0 deletions crates/openvino/tests/fixtures/mobilenet/mobilenet.bin
Git LFS file not shown
Loading