diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8bd0b28 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +*.pyc +*.swp +*.pkl +*.pth +result* +weights* \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..b4be93e --- /dev/null +++ b/README.md @@ -0,0 +1,83 @@ +## CRAFT: Character-Region Awareness For Text detection +Official Pytorch implementation of CRAFT text detector | [Paper](https://arxiv.org/abs/1904.01941) | [Pretrained Model](https://drive.google.com/open?id=1Jk4eGD7crsqCCg9C9VjCLkMN3ze8kutZ) | [Supplementary](https://youtu.be/HI8MzpY8KMI) + +**[Youngmin Baek](mailto:youngmin.baek@navercorp.com), Bado Lee, Dongyoon Han, Sangdoo Yun, Hwalsuk Lee.** + +Clova AI Research, NAVER Corp. + +### Sample Results + +### Overview +PyTorch implementation for CRAFT text detector that effectively detect text area by exploring each character region and affinity between characters. The bounding box of texts are obtained by simply finding minimum bounding rectangles on binary map after thresholding character region and affinity scores. + +teaser + +## Updates +**4 Jun, 2019**: Initial update + + +## Getting started +### Install dependencies +#### Requirements +- PyTorch>=0.4.1 +- torchvision>=0.2.1 +- opencv-python>=3.4.2 +- check requiremtns.txt +``` +pip install -r requirements.txt +``` + +### Training +We are currently in the process of cleaning training code for disclosure. + +### Test instruction using pretrained model +- Download [Trained Model on IC13,IC17](https://drive.google.com/open?id=1Jk4eGD7crsqCCg9C9VjCLkMN3ze8kutZ) +* Run with pretrained model +``` (with python 3.7) +python test.py --trained_model=[weightfile] --test_folder=[folder path to test images] +``` + +The result image and socre maps will be saved to `./result` by default. + +### Arguments +* `--trained_model`: pretrained model +* `--text_threshold`: text confidence threshold +* `--low_text`: text low-bound score +* `--link_threshold`: link confidence threshold +* `--canvas_size`: max image size for inference +* `--mag_ratio`: image magnification ratio +* `--show_time`: show processing time +* `--test_folder`: folder path to input images + +## Citation +``` +@article{baek2019character, + title={Character Region Awareness for Text Detection}, + author={Baek, Youngmin and Lee, Bado and Han, Dongyoon and Yun, Sangdoo and Lee, Hwalsuk}, + journal={arXiv preprint arXiv:1904.01941}, + year={2019} +} +``` + +## License +``` +Copyright (c) 2019-present NAVER Corp. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +``` diff --git a/basenet/__init__.py b/basenet/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/basenet/vgg16_bn.py b/basenet/vgg16_bn.py new file mode 100644 index 0000000..f3f21a7 --- /dev/null +++ b/basenet/vgg16_bn.py @@ -0,0 +1,73 @@ +from collections import namedtuple + +import torch +import torch.nn as nn +import torch.nn.init as init +from torchvision import models +from torchvision.models.vgg import model_urls + +def init_weights(modules): + for m in modules: + if isinstance(m, nn.Conv2d): + init.xavier_uniform_(m.weight.data) + if m.bias is not None: + m.bias.data.zero_() + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + elif isinstance(m, nn.Linear): + m.weight.data.normal_(0, 0.01) + m.bias.data.zero_() + +class vgg16_bn(torch.nn.Module): + def __init__(self, pretrained=True, freeze=True): + super(vgg16_bn, self).__init__() + model_urls['vgg16_bn'] = model_urls['vgg16_bn'].replace('https://', 'http://') + vgg_pretrained_features = models.vgg16_bn(pretrained=pretrained).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + for x in range(12): # conv2_2 + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 19): # conv3_3 + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(19, 29): # conv4_3 + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(29, 39): # conv5_3 + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + + # fc6, fc7 without atrous conv + self.slice5 = torch.nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1), + nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6), + nn.Conv2d(1024, 1024, kernel_size=1) + ) + + if not pretrained: + init_weights(self.slice1.modules()) + init_weights(self.slice2.modules()) + init_weights(self.slice3.modules()) + init_weights(self.slice4.modules()) + + init_weights(self.slice5.modules()) # no pretrained model for fc6 and fc7 + + if freeze: + for param in self.slice1.parameters(): # only first conv + param.requires_grad= False + + def forward(self, X): + h = self.slice1(X) + h_relu2_2 = h + h = self.slice2(h) + h_relu3_2 = h + h = self.slice3(h) + h_relu4_3 = h + h = self.slice4(h) + h_relu5_3 = h + h = self.slice5(h) + h_fc7 = h + vgg_outputs = namedtuple("VggOutputs", ['fc7', 'relu5_3', 'relu4_3', 'relu3_2', 'relu2_2']) + out = vgg_outputs(h_fc7, h_relu5_3, h_relu4_3, h_relu3_2, h_relu2_2) + return out diff --git a/craft.py b/craft.py new file mode 100755 index 0000000..b8fc352 --- /dev/null +++ b/craft.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F + +from basenet.vgg16_bn import vgg16_bn, init_weights + +class double_conv(nn.Module): + def __init__(self, in_ch, mid_ch, out_ch): + super(double_conv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1), + nn.BatchNorm2d(mid_ch), + nn.ReLU(inplace=True), + nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1), + nn.BatchNorm2d(out_ch), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + x = self.conv(x) + return x + + +class CRAFT(nn.Module): + def __init__(self, pretrained=False, freeze=False): + super(CRAFT, self).__init__() + + """ Base network """ + self.basenet = vgg16_bn(pretrained, freeze) + + """ U network """ + self.upconv1 = double_conv(1024, 512, 256) + self.upconv2 = double_conv(512, 256, 128) + self.upconv3 = double_conv(256, 128, 64) + self.upconv4 = double_conv(128, 64, 32) + + num_class = 2 + self.conv_cls = nn.Sequential( + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(32, 16, kernel_size=3, padding=1), nn.ReLU(inplace=True), + nn.Conv2d(16, 16, kernel_size=1), nn.ReLU(inplace=True), + nn.Conv2d(16, num_class, kernel_size=1), + ) + + init_weights(self.upconv1.modules()) + init_weights(self.upconv2.modules()) + init_weights(self.upconv3.modules()) + init_weights(self.upconv4.modules()) + init_weights(self.conv_cls.modules()) + + def forward(self, x): + """ Base network """ + sources = self.basenet(x) + + """ U network """ + y = torch.cat([sources[0], sources[1]], dim=1) + y = self.upconv1(y) + + y = F.interpolate(y, size=sources[2].size()[2:], mode='bilinear', align_corners=False) + y = torch.cat([y, sources[2]], dim=1) + y = self.upconv2(y) + + y = F.interpolate(y, size=sources[3].size()[2:], mode='bilinear', align_corners=False) + y = torch.cat([y, sources[3]], dim=1) + y = self.upconv3(y) + + y = F.interpolate(y, size=sources[4].size()[2:], mode='bilinear', align_corners=False) + y = torch.cat([y, sources[4]], dim=1) + feature = self.upconv4(y) + + y = self.conv_cls(feature) + + return y.permute(0,2,3,1), feature + +if __name__ == '__main__': + model = CRAFT(pretrained=True).cuda() + output, _ = model(torch.randn(1, 3, 768, 768).cuda()) + print(output.shape) \ No newline at end of file diff --git a/craft_utils.py b/craft_utils.py new file mode 100755 index 0000000..6f32346 --- /dev/null +++ b/craft_utils.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import math + + +def getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text): + # prepare data + linkmap = linkmap.copy() + textmap = textmap.copy() + img_h, img_w = textmap.shape + + """ labeling method """ + ret, text_score = cv2.threshold(textmap, low_text, 1, 0) + ret, link_score = cv2.threshold(linkmap, link_threshold, 1, 0) + + text_score_comb = np.clip(text_score + link_score, 0, 1) + nLabels, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8), connectivity=4) + + det = [] + mapper = [] + for k in range(1,nLabels): + # size filtering + size = stats[k, cv2.CC_STAT_AREA] + if size < 10: continue + + # thresholding + if np.max(textmap[labels==k]) < text_threshold: continue + + # make segmentation map + segmap = np.zeros(textmap.shape, dtype=np.uint8) + segmap[labels==k] = 255 + segmap[np.logical_and(link_score==1, text_score==0)] = 0 # remove link area + x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP] + w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT] + niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2) + sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1 + # boundary check + if sx < 0 : sx = 0 + if sy < 0 : sy = 0 + if ex >= img_w: ex = img_w + if ey >= img_h: ey = img_h + kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1 + niter, 1 + niter)) + segmap[sy:ey, sx:ex] = cv2.dilate(segmap[sy:ey, sx:ex], kernel) + + # make box + np_contours = np.roll(np.array(np.where(segmap!=0)),1,axis=0).transpose().reshape(-1,2) + rectangle = cv2.minAreaRect(np_contours) + box = cv2.boxPoints(rectangle) + + # align diamond-shape + w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2]) + box_ratio = max(w, h) / (min(w, h) + 1e-5) + if abs(1 - box_ratio) <= 0.1: + l, r = min(np_contours[:,0]), max(np_contours[:,0]) + t, b = min(np_contours[:,1]), max(np_contours[:,1]) + box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32) + + # make clock-wise order + startidx = box.sum(axis=1).argmin() + box = np.roll(box, 4-startidx, 0) + box = np.array(box) + + det.append(box) + mapper.append(k) + + return det, labels, mapper + + +def getDetBoxes(textmap, linkmap, text_threshold, link_threshold, low_text): + boxes, labels, mapper = getDetBoxes_core(textmap, linkmap, text_threshold, link_threshold, low_text) + + return boxes + +def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net = 2): + if len(polys) > 0: + polys = np.array(polys) + for k in range(len(polys)): + if polys[k] is not None: + polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net) + return polys diff --git a/figures/craft_example.gif b/figures/craft_example.gif new file mode 100644 index 0000000..65eb35b Binary files /dev/null and b/figures/craft_example.gif differ diff --git a/file_utils.py b/file_utils.py new file mode 100644 index 0000000..94ab040 --- /dev/null +++ b/file_utils.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +import os +import numpy as np +import cv2 +import imgproc + +# borrowed from https://github.com/lengstrom/fast-style-transfer/blob/master/src/utils.py +def get_files(img_dir): + imgs, masks, xmls = list_files(img_dir) + return imgs, masks, xmls + +def list_files(in_path): + img_files = [] + mask_files = [] + gt_files = [] + for (dirpath, dirnames, filenames) in os.walk(in_path): + for file in filenames: + filename, ext = os.path.splitext(file) + ext = str.lower(ext) + if ext == '.jpg' or ext == '.jpeg' or ext == '.gif' or ext == '.png' or ext == '.pgm': + img_files.append(os.path.join(dirpath, file)) + elif ext == '.bmp': + mask_files.append(os.path.join(dirpath, file)) + elif ext == '.xml' or ext == '.gt' or ext == '.txt': + gt_files.append(os.path.join(dirpath, file)) + elif ext == '.zip': + continue + # img_files.sort() + # mask_files.sort() + # gt_files.sort() + return img_files, mask_files, gt_files + +def saveResult(img_file, img, boxes, dirname='./result/', verticals=None, texts=None): + """ save text detection result one by one + Args: + img_file (str): image file name + img (array): raw image context + boxes (array): array of result file + Shape: [num_detections, 4] for BB output / [num_detections, 4] for QUAD output + Return: + None + """ + img = np.array(img) + + # make result file list + filename, file_ext = os.path.splitext(os.path.basename(img_file)) + + # result directory + res_file = dirname + "res_" + filename + '.txt' + res_img_file = dirname + "res_" + filename + '.jpg' + + if not os.path.isdir(dirname): + os.mkdir(dirname) + + with open(res_file, 'w') as f: + for i, box in enumerate(boxes): + poly = np.array(box).astype(np.int32).reshape((-1)) + strResult = ','.join([str(p) for p in poly]) + '\r\n' + f.write(strResult) + + poly = poly.reshape(-1, 2) + cv2.polylines(img, [poly.reshape((-1, 1, 2))], True, color=(0, 0, 255), thickness=2) + ptColor = (0, 255, 255) + if verticals is not None: + if verticals[i]: + ptColor = (255, 0, 0) + + if texts is not None: + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 0.5 + cv2.putText(img, "{}".format(texts[i]), (poly[0][0]+1, poly[0][1]+1), font, font_scale, (0, 0, 0), thickness=1) + cv2.putText(img, "{}".format(texts[i]), tuple(poly[0]), font, font_scale, (0, 255, 255), thickness=1) + + # Save result image + cv2.imwrite(res_img_file, img) + diff --git a/imgproc.py b/imgproc.py new file mode 100644 index 0000000..fe882ab --- /dev/null +++ b/imgproc.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +import numpy as np +from skimage import io +import cv2 + +def loadImage(img_file): + img = io.imread(img_file) # RGB order + if img.shape[0] == 2: img = img[0] + if len(img.shape) == 2 : img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) + if img.shape[2] == 4: img = img[:,:,:3] + img = np.array(img) + + return img + +def normalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): + # should be RGB order + img = in_img.copy().astype(np.float32) + + img -= np.array([mean[0] * 255.0, mean[1] * 255.0, mean[2] * 255.0], dtype=np.float32) + img /= np.array([variance[0] * 255.0, variance[1] * 255.0, variance[2] * 255.0], dtype=np.float32) + return img + +def denormalizeMeanVariance(in_img, mean=(0.485, 0.456, 0.406), variance=(0.229, 0.224, 0.225)): + # should be RGB order + img = in_img.copy() + img *= variance + img += mean + img *= 255.0 + img = np.clip(img, 0, 255).astype(np.uint8) + return img + +def resize_aspect_ratio(img, square_size, interpolation, mag_ratio=1): + height, width, channel = img.shape + + # magnify image size + target_size = mag_ratio * max(height, width) + + # set original image size + if target_size > square_size: + target_size = square_size + + ratio = target_size / max(height, width) + + target_h, target_w = int(height * ratio), int(width * ratio) + proc = cv2.resize(img, (target_w, target_h), interpolation = interpolation) + + + # make canvas and paste image + target_h32, target_w32 = target_h, target_w + if target_h % 32 != 0: + target_h32 = target_h + (32 - target_h % 32) + if target_w % 32 != 0: + target_w32 = target_w + (32 - target_w % 32) + resized = np.zeros((target_h32, target_w32, channel), dtype=np.float32) + resized[0:target_h, 0:target_w, :] = proc + target_h, target_w = target_h32, target_w32 + + size_heatmap = (int(target_w/2), int(target_h/2)) + + return resized, ratio, size_heatmap + +def cvt2HeatmapImg(img): + img = (np.clip(img, 0, 1) * 255).astype(np.uint8) + img = cv2.applyColorMap(img, cv2.COLORMAP_JET) + return img diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..f4b2412 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +torch==0.4.1.post2 +torchvision==0.2.1 +opencv-python==3.4.2.17 +scikit-image==0.14.2 +scipy==1.1.0 \ No newline at end of file diff --git a/test.py b/test.py new file mode 100755 index 0000000..2b916f5 --- /dev/null +++ b/test.py @@ -0,0 +1,119 @@ +# -*- coding: utf-8 -*- +import sys +import os +import time +import argparse + +import torch +import torch.nn as nn +import torch.backends.cudnn as cudnn +from torch.autograd import Variable + +from PIL import Image + +import cv2 +from skimage import io +import numpy as np +import craft_utils +import imgproc +import file_utils +import json +import zipfile + +from craft import CRAFT + +def str2bool(v): + return v.lower() in ("yes", "y", "true", "t", "1") + +parser = argparse.ArgumentParser(description='CRAFT Text Detection') +parser.add_argument('--trained_model', default='weights/craft_mlt_25k.pth', type=str, help='pretrained model') +parser.add_argument('--text_threshold', default=0.7, type=float, help='text confidence threshold') +parser.add_argument('--low_text', default=0.4, type=float, help='text low-bound score') +parser.add_argument('--link_threshold', default=0.4, type=float, help='link confidence threshold') +parser.add_argument('--cuda', default=True, type=str2bool, help='Use cuda to train model') +parser.add_argument('--canvas_size', default=1280, type=int, help='image size for inference') +parser.add_argument('--mag_ratio', default=1.5, type=float, help='image magnification ratio') +parser.add_argument('--show_time', default=False, action='store_true', help='show processing time') +parser.add_argument('--test_folder', default='/data/', type=str, help='folder path to input images') + +args = parser.parse_args() + + +""" For test images in a folder """ +image_list, _, _ = file_utils.get_files(args.test_folder) + +result_folder = './result/' +if not os.path.isdir(result_folder): + os.mkdir(result_folder) + +def test_net(net, image, text_threshold, link_threshold, low_text, cuda): + t0 = time.time() + + # resize + img_resized, target_ratio, size_heatmap = imgproc.resize_aspect_ratio(image, args.canvas_size, interpolation=cv2.INTER_LINEAR, mag_ratio=args.mag_ratio) + ratio_h = ratio_w = 1 / target_ratio + + # preprocessing + x = imgproc.normalizeMeanVariance(img_resized) + x = torch.from_numpy(x).permute(2, 0, 1) # [h, w, c] to [c, h, w] + x = Variable(x.unsqueeze(0)) # [c, h, w] to [b, c, h, w] + if cuda: + x = x.cuda() + + # forward pass + y, _ = net(x) + + # make score and link map + score_text = y[0,:,:,0].cpu().data.numpy() + score_link = y[0,:,:,1].cpu().data.numpy() + + t0 = time.time() - t0 + t1 = time.time() + + # Post-processing + boxes = craft_utils.getDetBoxes(score_text, score_link, text_threshold, link_threshold, low_text) + boxes = craft_utils.adjustResultCoordinates(boxes, ratio_w, ratio_h) + + t1 = time.time() - t1 + + # render results (optional) + render_img = score_text.copy() + render_img = np.hstack((render_img, score_link)) + ret_score_text = imgproc.cvt2HeatmapImg(render_img) + + if args.show_time : print("\ninfer/postproc time : {:.3f}/{:.3f}".format(t0, t1)) + + return boxes, ret_score_text + + + +if __name__ == '__main__': + # load net + net = CRAFT() # initialize + + if args.cuda: + net = net.cuda() + net = torch.nn.DataParallel(net) + cudnn.benchmark = False + + print('Loading weights from checkpoint (' + args.trained_model + ')') + net.load_state_dict(torch.load(args.trained_model)) + net.eval() + + t = time.time() + + # load data + for k, image_path in enumerate(image_list): + print("Test image {:d}/{:d}: {:s}".format(k+1, len(image_list), image_path), end='\r') + image = imgproc.loadImage(image_path) + + bboxes, score_text = test_net(net, image, args.text_threshold, args.link_threshold, args.low_text, args.cuda) + + # save score text + filename, file_ext = os.path.splitext(os.path.basename(image_path)) + mask_file = result_folder + "/res_" + filename + '_mask.jpg' + cv2.imwrite(mask_file, score_text) + + file_utils.saveResult(image_path, image[:,:,::-1], bboxes, dirname=result_folder) + + print("elapsed time : {}s".format(time.time() - t))