Skip to content

Commit

Permalink
Fix a memory leak issue.
Browse files Browse the repository at this point in the history
  • Loading branch information
shono committed Jan 14, 2021
1 parent e5fd060 commit f9c6003
Show file tree
Hide file tree
Showing 3 changed files with 175 additions and 144 deletions.
139 changes: 75 additions & 64 deletions lib/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ var Model = /*#__PURE__*/function () {
this.model = _context.sent;
this.input_size = this.model.inputs[0].shape[1];
this.is_new_od_model = this.model.outputs.length == 3;
this.is_rgb_input = this.model.metadata['Image.BitmapPixelFormat'] == 'Rgb8' || this.is_new_od_model;
this.is_rgb_input = this.model.metadata && this.model.metadata['Image.BitmapPixelFormat'] == 'Rgb8' || this.is_new_od_model;

case 7:
case "end":
Expand All @@ -80,33 +80,33 @@ var Model = /*#__PURE__*/function () {
key: "executeAsync",
value: function () {
var _executeAsync = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee2(pixels) {
var _this = this;

var inputs, outputs, arrays;
return _regenerator.default.wrap(function _callee2$(_context2) {
while (1) {
switch (_context2.prev = _context2.next) {
case 0:
inputs = pixels instanceof tf.Tensor ? pixels : this._preprocess(tf.browser.fromPixels(pixels, 3));
inputs = tf.tidy(function () {
return pixels instanceof tf.Tensor ? pixels : _this._preprocess(tf.browser.fromPixels(pixels, 3));
});
_context2.next = 3;
return this.model.executeAsync(inputs, this.is_new_od_model ? this.NEW_OD_OUTPUT_TENSORS : null);

case 3:
outputs = _context2.sent;
arrays = !Array.isArray(outputs) ? outputs.array() : Promise.all(outputs.map(function (t) {
tf.dispose(inputs);
_context2.next = 7;
return !Array.isArray(outputs) ? outputs.array() : Promise.all(outputs.map(function (t) {
return t.array();
}));
_context2.t0 = this;
_context2.next = 8;
return arrays;

case 8:
_context2.t1 = _context2.sent;
_context2.next = 11;
return _context2.t0._postprocess.call(_context2.t0, _context2.t1);

case 11:
return _context2.abrupt("return", _context2.sent);
case 7:
arrays = _context2.sent;
tf.dispose(outputs);
return _context2.abrupt("return", this._postprocess(arrays));

case 12:
case 10:
case "end":
return _context2.stop();
}
Expand Down Expand Up @@ -154,12 +154,12 @@ var ObjectDetectionModel = /*#__PURE__*/function (_Model) {
var _super = _createSuper(ObjectDetectionModel);

function ObjectDetectionModel() {
var _this;
var _this2;

(0, _classCallCheck2.default)(this, ObjectDetectionModel);
_this = _super.call(this);
_this.ANCHORS = [0.573, 0.677, 1.87, 2.06, 3.34, 5.47, 7.88, 3.53, 9.77, 9.17];
return _this;
_this2 = _super.call(this);
_this2.ANCHORS = [0.573, 0.677, 1.87, 2.06, 3.34, 5.47, 7.88, 3.53, 9.77, 9.17];
return _this2;
}

(0, _createClass2.default)(ObjectDetectionModel, [{
Expand All @@ -172,7 +172,10 @@ var ObjectDetectionModel = /*#__PURE__*/function (_Model) {
key: "_postprocess",
value: function () {
var _postprocess3 = (0, _asyncToGenerator2.default)( /*#__PURE__*/_regenerator.default.mark(function _callee4(outputs) {
var num_anchor, channels, height, width, num_class, boxes, scores, classes, grid_y, grid_x, offset, i, x, y, w, h, objectness, class_probabilities, max_index, selected_indices;
var _this3 = this;

var _tf$tidy, _tf$tidy2, boxes, scores, classes, selected_indices, tensor_results, results;

return _regenerator.default.wrap(function _callee4$(_context4) {
while (1) {
switch (_context4.prev = _context4.next) {
Expand All @@ -185,68 +188,76 @@ var ObjectDetectionModel = /*#__PURE__*/function (_Model) {
return _context4.abrupt("return", outputs);

case 2:
// TODO: Need more efficient implmentation
num_anchor = this.ANCHORS.length / 2;
channels = outputs[0][0][0].length;
height = outputs[0].length;
width = outputs[0][0].length;
num_class = channels / num_anchor - 5;
boxes = [];
scores = [];
classes = [];

for (grid_y = 0; grid_y < height; grid_y++) {
for (grid_x = 0; grid_x < width; grid_x++) {
offset = 0;

for (i = 0; i < num_anchor; i++) {
x = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_x) / width;
y = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_y) / height;
w = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2] / width;
h = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2 + 1] / height;
objectness = tf.scalar(this._logistic(outputs[0][grid_y][grid_x][offset++]));
class_probabilities = tf.tensor1d(outputs[0][grid_y][grid_x].slice(offset, offset + num_class)).softmax();
offset += num_class;
class_probabilities = class_probabilities.mul(objectness);
max_index = class_probabilities.argMax();
boxes.push([x - w / 2, y - h / 2, x + w / 2, y + h / 2]);
scores.push(class_probabilities.max().dataSync()[0]);
classes.push(max_index.dataSync()[0]);
_tf$tidy = tf.tidy(function () {
// TODO: Need more efficient implmentation
var num_anchor = _this3.ANCHORS.length / 2;
var channels = outputs[0][0][0].length;
var height = outputs[0].length;
var width = outputs[0][0].length;
var num_class = channels / num_anchor - 5;
var boxes = [];
var scores = [];
var classes = [];

for (var grid_y = 0; grid_y < height; grid_y++) {
for (var grid_x = 0; grid_x < width; grid_x++) {
var offset = 0;

for (var i = 0; i < num_anchor; i++) {
var x = (_this3._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_x) / width;
var y = (_this3._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_y) / height;
var w = Math.exp(outputs[0][grid_y][grid_x][offset++]) * _this3.ANCHORS[i * 2] / width;
var h = Math.exp(outputs[0][grid_y][grid_x][offset++]) * _this3.ANCHORS[i * 2 + 1] / height;
var objectness = tf.scalar(_this3._logistic(outputs[0][grid_y][grid_x][offset++]));
var class_probabilities = tf.tensor1d(outputs[0][grid_y][grid_x].slice(offset, offset + num_class)).softmax();
offset += num_class;
class_probabilities = class_probabilities.mul(objectness);
var max_index = class_probabilities.argMax();
boxes.push([x - w / 2, y - h / 2, x + w / 2, y + h / 2]);
scores.push(class_probabilities.max().dataSync()[0]);
classes.push(max_index.dataSync()[0]);
}
}
}
}

boxes = tf.tensor2d(boxes);
scores = tf.tensor1d(scores);
classes = tf.tensor1d(classes);
_context4.next = 16;
boxes = tf.tensor2d(boxes);
scores = tf.tensor1d(scores);
classes = tf.tensor1d(classes);
return [boxes, scores, classes];
}), _tf$tidy2 = (0, _slicedToArray2.default)(_tf$tidy, 3), boxes = _tf$tidy2[0], scores = _tf$tidy2[1], classes = _tf$tidy2[2];
_context4.next = 5;
return tf.image.nonMaxSuppressionAsync(boxes, scores, 10);

case 16:
case 5:
selected_indices = _context4.sent;
_context4.next = 19;
return boxes.gather(selected_indices).array();
tensor_results = [boxes.gather(selected_indices), scores.gather(selected_indices), classes.gather(selected_indices)];
_context4.next = 9;
return tensor_results[0].array();

case 19:
case 9:
_context4.t0 = _context4.sent;
_context4.next = 22;
return scores.gather(selected_indices).array();
_context4.next = 12;
return tensor_results[1].array();

case 22:
case 12:
_context4.t1 = _context4.sent;
_context4.next = 25;
return classes.gather(selected_indices).array();
_context4.next = 15;
return tensor_results[2].array();

case 25:
case 15:
_context4.t2 = _context4.sent;
return _context4.abrupt("return", [_context4.t0, _context4.t1, _context4.t2]);
results = [_context4.t0, _context4.t1, _context4.t2];
tf.dispose([boxes, scores, classes]);
tf.dispose(selected_indices);
tf.dispose(tensor_results);
return _context4.abrupt("return", results);

case 27:
case 21:
case "end":
return _context4.stop();
}
}
}, _callee4, this);
}, _callee4);
}));

function _postprocess(_x4) {
Expand Down
90 changes: 50 additions & 40 deletions lib/index.umd.js
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,20 @@
this.model = await tf.loadGraphModel(modelUrl, options);
this.input_size = this.model.inputs[0].shape[1];
this.is_new_od_model = this.model.outputs.length == 3;
this.is_rgb_input = this.model.metadata['Image.BitmapPixelFormat'] == 'Rgb8' || this.is_new_od_model;
this.is_rgb_input = (this.model.metadata && this.model.metadata['Image.BitmapPixelFormat'] == 'Rgb8') || this.is_new_od_model;
}

dispose() {
this.model.dispose();
}

async executeAsync(pixels) {
const inputs = pixels instanceof tf.Tensor ? pixels : this._preprocess(tf.browser.fromPixels(pixels, 3));
const inputs = tf.tidy(() => { return pixels instanceof tf.Tensor ? pixels : this._preprocess(tf.browser.fromPixels(pixels, 3)); });
const outputs = await this.model.executeAsync(inputs, this.is_new_od_model ? this.NEW_OD_OUTPUT_TENSORS : null);
const arrays = !Array.isArray(outputs) ? outputs.array() : Promise.all(outputs.map(t => t.array()));
return await this._postprocess(await arrays);
tf.dispose(inputs);
const arrays = await (!Array.isArray(outputs) ? outputs.array() : Promise.all(outputs.map(t => t.array())));
tf.dispose(outputs);
return this._postprocess(arrays);
}

async _postprocess(outputs) {
Expand All @@ -48,47 +50,55 @@
return outputs; // New model doesn't need post processing.
}

// TODO: Need more efficient implmentation
const num_anchor = this.ANCHORS.length / 2;
const channels = outputs[0][0][0].length;
const height = outputs[0].length;
const width = outputs[0][0].length;

const num_class = channels / num_anchor - 5;

let boxes = [];
let scores = [];
let classes = [];

for (var grid_y = 0; grid_y < height; grid_y++) {
for (var grid_x = 0; grid_x < width; grid_x++) {
let offset = 0;

for (var i = 0; i < num_anchor; i++) {
let x = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_x) / width;
let y = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_y) / height;
let w = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2] / width;
let h = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2 + 1] / height;

let objectness = tf.scalar(this._logistic(outputs[0][grid_y][grid_x][offset++]));
let class_probabilities = tf.tensor1d(outputs[0][grid_y][grid_x].slice(offset, offset + num_class)).softmax();
offset += num_class;

class_probabilities = class_probabilities.mul(objectness);
let max_index = class_probabilities.argMax();
boxes.push([x - w / 2, y - h / 2, x + w / 2, y + h / 2]);
scores.push(class_probabilities.max().dataSync()[0]);
classes.push(max_index.dataSync()[0]);
const [boxes, scores, classes] = tf.tidy(() => {
// TODO: Need more efficient implmentation
const num_anchor = this.ANCHORS.length / 2;
const channels = outputs[0][0][0].length;
const height = outputs[0].length;
const width = outputs[0][0].length;

const num_class = channels / num_anchor - 5;

let boxes = [];
let scores = [];
let classes = [];

for (var grid_y = 0; grid_y < height; grid_y++) {
for (var grid_x = 0; grid_x < width; grid_x++) {
let offset = 0;

for (var i = 0; i < num_anchor; i++) {
let x = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_x) / width;
let y = (this._logistic(outputs[0][grid_y][grid_x][offset++]) + grid_y) / height;
let w = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2] / width;
let h = Math.exp(outputs[0][grid_y][grid_x][offset++]) * this.ANCHORS[i * 2 + 1] / height;

let objectness = tf.scalar(this._logistic(outputs[0][grid_y][grid_x][offset++]));
let class_probabilities = tf.tensor1d(outputs[0][grid_y][grid_x].slice(offset, offset + num_class)).softmax();
offset += num_class;

class_probabilities = class_probabilities.mul(objectness);
let max_index = class_probabilities.argMax();
boxes.push([x - w / 2, y - h / 2, x + w / 2, y + h / 2]);
scores.push(class_probabilities.max().dataSync()[0]);
classes.push(max_index.dataSync()[0]);
}
}
}
}
boxes = tf.tensor2d(boxes);
scores = tf.tensor1d(scores);
classes = tf.tensor1d(classes);

boxes = tf.tensor2d(boxes);
scores = tf.tensor1d(scores);
classes = tf.tensor1d(classes);
return [boxes, scores, classes];
});

const selected_indices = await tf.image.nonMaxSuppressionAsync(boxes, scores, 10);
return [await boxes.gather(selected_indices).array(), await scores.gather(selected_indices).array(), await classes.gather(selected_indices).array()];
const tensor_results = [boxes.gather(selected_indices), scores.gather(selected_indices), classes.gather(selected_indices)];
const results = [await tensor_results[0].array(), await tensor_results[1].array(), await tensor_results[2].array()];
tf.dispose([boxes, scores, classes]);
tf.dispose(selected_indices);
tf.dispose(tensor_results);
return results;
}

_logistic(x) {
Expand Down

0 comments on commit f9c6003

Please sign in to comment.