Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

LRN (Local response normalization) re-merge #152

Open
wants to merge 6 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ fn test_matmul_square_matrix() {
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#InstanceNormalization">InstanceNormalization</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#InstanceNormalization-6">6</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#InstanceNormalization-1">1</a>|
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#IsInf">IsInf</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#IsInf-10">10</a>|
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#IsNaN">IsNaN</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#IsNaN-13">13</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#IsNaN-9">9</a>|
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#LRN">LRN</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LRN-13">13</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LRN-1">1</a>||
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#LRN">LRN</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LRN-13">13</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LRN-1">1</a>|✅||
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#LSTM">LSTM</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LSTM-14">14</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LSTM-7">7</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LSTM-1">1</a>|
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#LeakyRelu">LeakyRelu</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LeakyRelu-6">6</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#LeakyRelu-1">1</a>|✅|✅|
|<a href="https://github.com/onnx/onnx/blob/main/docs/Operators.md#Less">Less</a>|<a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Less-13">13</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Less-9">9</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Less-7">7</a>, <a href="https://github.com/onnx/onnx/blob/main/docs/Changelog.md#Less-1">1</a>|✅|
Expand Down
37 changes: 37 additions & 0 deletions wonnx/src/compiler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,11 @@ lazy_static! {
include_str!("../templates/matrix/transpose.wgsl"),
)
.unwrap();
tera.add_raw_template(
"matrix/lrn.wgsl",
include_str!("../templates/matrix/lrn.wgsl"),
)
.unwrap();
tera.add_raw_template(
"pool/aggregate.wgsl",
include_str!("../templates/pool/aggregate.wgsl"),
Expand Down Expand Up @@ -1321,6 +1326,38 @@ pub fn compile(
threads: (ceil(output_lengths[0], 256) as _, 1, 1),
}
}
"LRN" => {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#lrn
let alpha = node.get_attribute_value("alpha", Some(0.0001))?;
let beta = node.get_attribute_value("beta", Some(0.75))?;
let bias = node.get_attribute_value("bias", Some(1.0))?;
let size = node.get_attribute_value("size", Some(1))?;

context.insert("alpha", &alpha);
context.insert("beta", &beta);
context.insert("bias", &bias);
context.insert("size", &size);

let left_size = f64::floor((size - 1) as f64 / 2.0) as u32;
let right_size = f64::ceil((size - 1) as f64 / 2.0) as u32;

context.insert("left_size", &left_size);
context.insert("right_size", &right_size);

let (x_threads, workgroup_size_x) = workgroup_size(
output_lengths[0],
MAX_COMPUTE_WORKGROUPS_PER_DIMENSION,
MAX_WORKGROUP_SIZE_X,
)?;
context.insert("workgroup_size_x", &workgroup_size_x);
context.insert("i_chunks", &input_chunks);

NodeTemplate {
scalar_type: agreed_type(input_shapes, output_shapes)?,
template: "matrix/lrn.wgsl",
threads: (x_threads, 1, 1),
}
}
op => return Err(CompileError::UnimplementedOp(op.to_string())),
};

Expand Down
23 changes: 23 additions & 0 deletions wonnx/templates/matrix/lrn.wgsl
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
{%- include "structs.wgsl" -%}

@group(0) @binding(0)
var<storage, read> input_0: Array;

@group(0) @binding(1)
var<storage, read_write> output_0: Array;

@compute @workgroup_size({{ workgroup_size_x }})
fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
let c = global_id.x;
//let chunk_start = {{ i_chunks[0][1] }}u * c;
let start = (c / {{ i_shape[0][1] }}u) * {{ i_shape[0][1] }}u;
let end = start + {{ i_shape[0][1] - 1 }}u;

var square_sum: Scalar = Scalar();
for (var i = max(start, c - {{left_size}}u); i <= min(end, c + {{right_size}}u); i++) {
let I = input_0.data[i];
square_sum += I * I;
}

output_0.data[c] = input_0.data[ c ] / pow({{ scalar_type }}({{ bias }}) + ({{ scalar_type }}({{ alpha }}) / {{ scalar_type }}({{ size }})) * square_sum, {{ scalar_type }}({{ beta }}));
}
61 changes: 61 additions & 0 deletions wonnx/tests/localresponsenormalization.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
use std::{collections::HashMap, convert::TryInto};
use wonnx::utils::{attribute, graph, model, node, tensor};
mod common;

#[test]
fn local_response_normalization() {
let mut input_data = HashMap::new();

let batches = 1;
let width_height: usize = 3;
let channels: usize = 4;
let data: Vec<f32> = [
1., 1., 2., 4., 2., 2., 1., 2., 3., 1., 2., 1., 4., 2., 3., 5., 3., 3., 2., 2., 6., 2., 3.,
1., 7., 3., 4., 2., 8., 4., 3., 2., 9., 3., 4., 4.,
]
.to_vec();

let shape = vec![
batches as i64,
channels as i64,
width_height as i64,
width_height as i64,
];
input_data.insert("X".to_string(), data.as_slice().into());

let bn_model = model(graph(
vec![tensor("X", &shape)], // input
vec![tensor("Y", &shape)], // output
vec![], // infos
vec![], // intializers
// nodes
vec![node(
vec!["X"],
vec!["Y"],
"lrn",
"LRN",
vec![
attribute("alpha", 1.0),
attribute("beta", 1.0),
attribute("bias", 0.0),
attribute("size", 2),
],
)],
));

// LOGIC
let session =
pollster::block_on(wonnx::Session::from_model(bn_model)).expect("Session did not create");

let result = pollster::block_on(session.run(&input_data)).unwrap();
let out_y = &result["Y"];

common::assert_eq_vector(
out_y.try_into().unwrap(),
&[
1.0, 0.4, 0.2, 0.5, 0.5, 0.8, 0.4, 1.0, 0.6, 0.4, 0.8, 2.0, 0.4, 0.30769232, 0.1764706,
0.39999998, 0.33333334, 0.4615385, 0.5, 1.0, 0.3, 0.30769232, 0.6, 2.0, 0.2413793,
0.24, 0.4, 1.0, 0.2, 0.32, 0.4615385, 1.0, 0.2, 0.24, 0.25, 0.5,
],
);
}