Permalink
Browse files

edge window completed and CPU,multi-GPU mode add.

  • Loading branch information...
youyuge34 committed Jan 10, 2019
1 parent f33753b commit 6c9b53eb07017140ee41b5136515544b222a773b
Showing with 78 additions and 11 deletions.
  1. +9 −3 README.md
  2. +1 −0 config.yml.example
  3. +41 −3 demo_patch.py
  4. BIN files/cut3.gif
  5. +9 −3 main.py
  6. +18 −2 src/models.py
@@ -10,15 +10,18 @@ English | [中文版介绍](#jump_zh)
<img src="files/demo_house.jpg" width="425" height="400">
</p>

#### Demo using
#### Demo using sole input
<p align="center">
<img src="files/cut2.gif" width="425" height="425">
</p>

#### Demo using input and edge to fine-tune
<p align="center">
<img src="files/cut3.gif" width="924" height="488">
</p>

Validation samples of Well trained model (weights files are provided below):


<p align="center">
<img src="files/2069500.png" width="640" height="380">
</p>
@@ -32,10 +35,13 @@ Compared with the original work, this project has such improvements:
- Auto-save and auto-load latest weights files
- Add a fast training phase combined with origin phase 2 and 3
- bugs fixed (most of them are merged into the original work)
- Add config of print frequency
- Add configs in `config.yml`
- PRINT_FREQUENCY
- DEVICE : cpu or gpu
- ... ...

**You can do the amazing Anime inpainting conveniently here.
And detailed training tutorial is introduced below.**

## Prerequisites
@@ -4,6 +4,7 @@ MASK: 3 # 1: random block, 2: half, 3: external, 4: (external, rando
EDGE: 1 # 1: canny, 2: external
NMS: 1 # 0: no non-max-suppression, 1: applies non-max-suppression on the external edges by multiplying by Canny
SEED: 10 # random seed
DEVICE: 1 # 0: CPU, 1: GPU
GPU: [0] # list of gpu ids
DEBUG: 0 # turns on debugging mode
VERBOSE: 0 # turns on verbose mode in the output console
@@ -7,16 +7,21 @@
USAGE:
python demo_patch.py --path <your weights directory path>
--edge (optional to open edge window)
README FIRST:
Two windows will show up, one for input and one for output.
if `--edge` arg is input, another edge window will show up.
[Important] Switch your typewriting into ENG first.
At first, in input window, draw black in the missing part using
mouse left button. Then press 'n' to path the image (once or a few times)
mouse left button. Then press 'n' to patch the image (once or a few times)
For any finer touch-ups, you can press any of the keys below and draw lines on
the areas you want. Then again press 'n' for updating the output.
In input window, left mouse is to draw the black mask which means the defect.
In edge window, left mouse is to draw the edge while right mouse is to erase the edges.
Press '[' and ']' to resize the brush thickness, as in PhotoShop.
Finally, press 's' to save the output.
Key '[' - To make the brush thickness smaller
@@ -57,6 +62,8 @@

radius = 3 # brush radius
drawing = False
drawing_edge_l = False
drawing_edge_r = False
value = DRAW_MASK
THICKNESS = -1 # solid brush circle 实心圆

@@ -94,10 +101,34 @@ def onmouse_edge(event, x, y, flags, param):
"""
# to change the variable outside of the function
# 为方法体外的变量赋值,声明global
global img, img2, drawing, value, mask, ix, iy, rect_over
global img, img2, drawing_edge_l, drawing_edge_r, value, mask, ix, iy, edge
# print(x, y)

# draw touchup curves
if event == cv.EVENT_LBUTTONDOWN:
drawing_edge_l = True
# cv.circle(edge, (x, y), 1, WHITE, THICKNESS, lineType=cv.LINE_AA)
edge[y, x] = 255

elif drawing_edge_l is True and event == cv.EVENT_MOUSEMOVE:
# cv.circle(edge, (x, y), 1, WHITE, THICKNESS, lineType=4)
edge[y, x] = 255

elif drawing_edge_l is True and event == cv.EVENT_LBUTTONUP:
drawing_edge_l = False
# cv.circle(edge, (x, y), 1, WHITE, THICKNESS, lineType=cv.LINE_AA)
edge[y, x] = 255

elif event == cv.EVENT_RBUTTONDOWN:
drawing_edge_r = True
cv.circle(edge, (x, y), radius, BLACK, THICKNESS, lineType=cv.LINE_AA)

elif drawing_edge_r is True and event == cv.EVENT_MOUSEMOVE:
cv.circle(edge, (x, y), radius, BLACK, THICKNESS, lineType=cv.LINE_AA)

elif drawing_edge_r is True and event == cv.EVENT_RBUTTONUP:
drawing_edge_r = False
cv.circle(edge, (x, y), radius, BLACK, THICKNESS, lineType=cv.LINE_AA)


def check_load(args):
@@ -167,12 +198,17 @@ def model_process(img, mask, edge=None):

# image: absolute image path
image = fileopenbox(msg='Select a image', title='Select', filetypes=[['*.png', '*.jpg', '*.jpeg', 'Image Files']])
if not image:
if image is None:
print('\nPlease select a image.')
exit()
else:
print('Image selected: ' + image)

img = cv.imread(image)

# resize the photo if it is too small
if max(img.shape[0], img.shape[1]) < 256:
img = cv.resize(img, dsize=(0, 0), fx=1.5, fy=1.5, interpolation=cv.INTER_CUBIC)
img2 = img.copy() # a copy of original image
mask = np.zeros(img.shape[:2], dtype=np.uint8) # mask initialized to PR_BG
output = np.zeros(img.shape, np.uint8) # output image to be shown
@@ -217,6 +253,8 @@ def model_process(img, mask, edge=None):
output = np.zeros(img.shape, np.uint8) # output image to be shown
if args.edge:
edge = np.zeros(img.shape, np.uint8)
drawing_edge_r = False
drawing_edge_l = False

elif k == ord('n'): # begin to path the image
print("\nPatching using input image...")
BIN +357 KB files/cut3.gif
Binary file not shown.
12 main.py
@@ -22,9 +22,15 @@ def main(mode=None, config=None):
else:
config = load_config(mode)

# init cuda environment
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
config.DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# init environment
if (config.DEVICE == 1 or config.DEVICE is None) and torch.cuda.is_available():
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)
config.DEVICE = torch.device("cuda")
torch.backends.cudnn.benchmark = True # cudnn auto-tuner
else:
config.DEVICE = torch.device("cpu")
# print(torch.cuda.is_available())
print('DEVICE is:', config.DEVICE)

# set cv2 running threads to 1 (prevents deadlocks with pytorch dataloader)
cv2.setNumThreads(0)
@@ -23,14 +23,20 @@ def load(self):
dis_path = get_model_list(self.config.PATH, self.name, 'dis')
if gen_path is not None:
print('Loading {} generator weights file: {}...'.format(self.name, gen_path))
data = torch.load(gen_path)
if self.config.DEVICE == torch.device('cuda'): # gpu
data = torch.load(gen_path)
else: # cpu
data = torch.load(gen_path, map_location=lambda storage, loc: storage)
self.generator.load_state_dict(data['generator'])
self.iteration = data['iteration']

# load discriminator only when training
if self.config.MODE == 1 and dis_path is not None:
print('Loading {} discriminator weights file: {}...'.format(self.name, dis_path))
data = torch.load(dis_path)
if self.config.DEVICE == torch.device('cuda'):
data = torch.load(dis_path)
else:
data = torch.load(dis_path, map_location=lambda storage, loc: storage)
self.discriminator.load_state_dict(data['discriminator'])

def save(self):
@@ -54,6 +60,11 @@ def __init__(self, config):
generator = EdgeGenerator(use_spectral_norm=True)
discriminator = Discriminator(in_channels=2, use_sigmoid=config.GAN_LOSS != 'hinge')

# multi-gpu
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator, config.GPU)

l1_loss = nn.L1Loss()
adversarial_loss = AdversarialLoss(type=config.GAN_LOSS)

@@ -144,6 +155,11 @@ def __init__(self, config):
generator = InpaintGenerator()
discriminator = Discriminator(in_channels=3, use_sigmoid=config.GAN_LOSS != 'hinge')

# multi-gpu
if len(config.GPU) > 1:
generator = nn.DataParallel(generator, config.GPU)
discriminator = nn.DataParallel(discriminator, config.GPU)

l1_loss = nn.L1Loss()
perceptual_loss = PerceptualLoss()
style_loss = StyleLoss()

0 comments on commit 6c9b53e

Please sign in to comment.