diff --git a/README.md b/README.md
index 1aa26fe..5b358ef 100644
--- a/README.md
+++ b/README.md
@@ -26,9 +26,9 @@ The model in Keras is built by the essay author, see [sergiomsilva/alpr-unconstr
| Confidence |
- 0.9705 |
- 0.9828 |
- 0.9987 |
+ 0.9841 |
+ 0.9945 |
+ 0.9979 |
diff --git a/docs/sample/annotated/03009.jpg b/docs/sample/annotated/03009.jpg
index 9bb789e..8edb56e 100644
Binary files a/docs/sample/annotated/03009.jpg and b/docs/sample/annotated/03009.jpg differ
diff --git a/docs/sample/annotated/03016.jpg b/docs/sample/annotated/03016.jpg
index cf95957..7c56c47 100644
Binary files a/docs/sample/annotated/03016.jpg and b/docs/sample/annotated/03016.jpg differ
diff --git a/docs/sample/annotated/03025.jpg b/docs/sample/annotated/03025.jpg
index e0021a2..b94d007 100644
Binary files a/docs/sample/annotated/03025.jpg and b/docs/sample/annotated/03025.jpg differ
diff --git a/docs/sample/warped/03009.jpg b/docs/sample/warped/03009.jpg
index 09a6871..578a35f 100644
Binary files a/docs/sample/warped/03009.jpg and b/docs/sample/warped/03009.jpg differ
diff --git a/docs/sample/warped/03016.jpg b/docs/sample/warped/03016.jpg
index 0c63f1b..0701f37 100644
Binary files a/docs/sample/warped/03016.jpg and b/docs/sample/warped/03016.jpg differ
diff --git a/docs/sample/warped/03025.jpg b/docs/sample/warped/03025.jpg
index 478788c..d729da1 100644
Binary files a/docs/sample/warped/03025.jpg and b/docs/sample/warped/03025.jpg differ
diff --git a/wpodnet/backend.py b/wpodnet/backend.py
index 6eabce5..f7b73ee 100644
--- a/wpodnet/backend.py
+++ b/wpodnet/backend.py
@@ -45,6 +45,7 @@ class Predictor:
[1., 1., 1., 1.]
])
_scaling_const = 7.75
+ _stride = 16
def __init__(self, wpodnet: WPODNet):
self.wpodnet = wpodnet
@@ -55,11 +56,15 @@ def _resize_to_fixed_ratio(self, image: Image.Image) -> Image.Image:
wh_ratio = max(h, w) / min(h, w)
side = int(wh_ratio * 288)
- bound_dim = min(side + side % 16, 608)
+ bound_dim = min(side + side % self._stride, 608)
factor = bound_dim / min(h, w)
reg_w, reg_h = int(w * factor), int(h * factor)
+ # Ensure the both width and height are the multiply of `self._stride`
+ reg_w += self._stride - reg_w % self._stride
+ reg_h += self._stride - reg_h % self._stride
+
return image.resize((reg_w, reg_h))
def _to_torch_image(self, image: Image.Image) -> torch.Tensor: