From 972dcc875563f94628478a09b60396d4adc1ad76 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Sun, 18 Apr 2021 12:53:43 +0800 Subject: [PATCH 1/9] BVDNet --- cores/options.py | 18 +- make_datasets/make_pix2pix_dataset.py | 2 +- models/BVDNet.py | 144 ++++++ models/model_util.py | 10 + train/add/train.py | 2 +- train/clean/train.py | 627 +++++++++++++------------- train/clean/train_old.py | 310 +++++++++++++ util/data.py | 204 ++++++--- util/image_processing.py | 30 +- util/util.py | 5 + 10 files changed, 964 insertions(+), 388 deletions(-) create mode 100644 models/BVDNet.py create mode 100644 models/model_util.py create mode 100644 train/clean/train_old.py diff --git a/cores/options.py b/cores/options.py index 7e8c165..6c972b5 100644 --- a/cores/options.py +++ b/cores/options.py @@ -11,7 +11,7 @@ def __init__(self): def initialize(self): #base - self.parser.add_argument('--use_gpu', type=int,default=0, help='if -1, use cpu') + self.parser.add_argument('--use_gpu', type=str,default='0', help='if -1, use cpu') self.parser.add_argument('--media_path', type=str, default='./imgs/ruoruo.jpg',help='your videos or images path') self.parser.add_argument('-ss', '--start_time', type=str, default='00:00:00',help='start position of video, default is the beginning of video') self.parser.add_argument('-t', '--last_time', type=str, default='00:00:00',help='duration of the video, default is the entire video') @@ -58,13 +58,15 @@ def getparse(self, test_flag = False): model_name = os.path.basename(self.opt.model_path) self.opt.temp_dir = os.path.join(self.opt.temp_dir, 'DeepMosaics_temp') - - os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.use_gpu) - import torch - if torch.cuda.is_available() and self.opt.use_gpu > -1: - pass - else: - self.opt.use_gpu = -1 + + + if self.opt.use_gpu != '-1': + os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.use_gpu) + import torch + if not torch.cuda.is_available(): + self.opt.use_gpu = '-1' + # else: + # self.opt.use_gpu = '-1' if test_flag: if not os.path.exists(self.opt.media_path): diff --git a/make_datasets/make_pix2pix_dataset.py b/make_datasets/make_pix2pix_dataset.py index c9ccb87..4256f6c 100644 --- a/make_datasets/make_pix2pix_dataset.py +++ b/make_datasets/make_pix2pix_dataset.py @@ -87,7 +87,7 @@ mask = mask_drawn if 'irregular' in opt.mod: mask_irr = impro.imread(irrpaths[random.randint(0,12000-1)],'gray') - mask_irr = data.random_transform_single(mask_irr, (img.shape[0],img.shape[1])) + mask_irr = data.random_transform_single_mask(mask_irr, (img.shape[0],img.shape[1])) mask = mask_irr if 'network' in opt.mod: mask_net = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0] diff --git a/models/BVDNet.py b/models/BVDNet.py new file mode 100644 index 0000000..eb26f04 --- /dev/null +++ b/models/BVDNet.py @@ -0,0 +1,144 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from .pix2pixHD_model import * + + +class Encoder2d(nn.Module): + def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm2d): + super(Encoder2d, self).__init__() + activation = nn.ReLU(True) + + model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] + ### downsample + for i in range(n_downsampling): + mult = 2**i + model += [nn.ReflectionPad2d(1),nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0), + norm_layer(ngf * mult * 2), activation] + + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + +class Encoder3d(nn.Module): + def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm3d): + super(Encoder3d, self).__init__() + activation = nn.ReLU(True) + + model = [nn.Conv3d(input_nc, ngf, kernel_size=3, padding=1), norm_layer(ngf), activation] + ### downsample + for i in range(n_downsampling): + mult = 2**i + model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), + norm_layer(ngf * mult * 2), activation] + + self.model = nn.Sequential(*model) + + def forward(self, input): + return self.model(input) + +class BVDNet(nn.Module): + def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3): + super(BVDNet, self).__init__() + + ngf = 64 + padding_type = 'reflect' + norm_layer = nn.BatchNorm2d + self.N = N + + # encoder + self.encoder3d = Encoder3d(input_nc,64,n_downsampling) + self.encoder2d = Encoder2d(input_nc,64,n_downsampling) + + ### resnet blocks + self.blocks = [] + mult = 2**n_downsampling + for i in range(n_blocks): + self.blocks += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=nn.ReLU(True), norm_layer=norm_layer)] + self.blocks = nn.Sequential(*self.blocks) + + ### decoder + self.decoder = [] + for i in range(n_downsampling): + mult = 2**(n_downsampling - i) + # self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), + # norm_layer(int(ngf * mult / 2)), nn.ReLU(True)] + self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'), + nn.ReflectionPad2d(1), + nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), + norm_layer(int(ngf * mult / 2)), + nn.ReLU(True)] + self.decoder += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] + self.decoder = nn.Sequential(*self.decoder) + self.limiter = nn.Tanh() + + def forward(self, stream, last): + this_shortcut = stream[:,:,self.N] + stream = self.encoder3d(stream) + stream = stream.reshape(stream.size(0),stream.size(1),stream.size(3),stream.size(4)) + # print(stream.shape) + last = self.encoder2d(last) + x = stream + last + x = self.blocks(x) + x = self.decoder(x) + x = x+this_shortcut + x = self.limiter(x) + #print(x.shape) + + # print(stream.shape,last.shape) + return x + +class VGGLoss(nn.Module): + def __init__(self, gpu_ids): + super(VGGLoss, self).__init__() + + self.vgg = Vgg19() + if gpu_ids != '-1' and len(gpu_ids) == 1: + self.vgg.cuda() + elif gpu_ids != '-1' and len(gpu_ids) > 1: + self.vgg = nn.DataParallel(self.vgg) + self.vgg.cuda() + + self.criterion = nn.L1Loss() + self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] + + def forward(self, x, y): + x_vgg, y_vgg = self.vgg(x), self.vgg(y) + loss = 0 + for i in range(len(x_vgg)): + loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) + return loss + +from torchvision import models +class Vgg19(torch.nn.Module): + def __init__(self, requires_grad=False): + super(Vgg19, self).__init__() + vgg_pretrained_features = models.vgg19(pretrained=True).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + for x in range(2): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(2, 7): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(7, 12): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 21): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(21, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h_relu1 = self.slice1(X) + h_relu2 = self.slice2(h_relu1) + h_relu3 = self.slice3(h_relu2) + h_relu4 = self.slice4(h_relu3) + h_relu5 = self.slice5(h_relu4) + out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] + return out diff --git a/models/model_util.py b/models/model_util.py new file mode 100644 index 0000000..8f1789f --- /dev/null +++ b/models/model_util.py @@ -0,0 +1,10 @@ +import torch +import torch.nn as nn + +def save(net,path,gpu_id): + if isinstance(net, nn.DataParallel): + torch.save(net.module.cpu().state_dict(),path) + else: + torch.save(net.cpu().state_dict(),path) + if gpu_id != '-1': + net.cuda() \ No newline at end of file diff --git a/train/add/train.py b/train/add/train.py index d64063e..8ccc6ee 100644 --- a/train/add/train.py +++ b/train/add/train.py @@ -68,7 +68,7 @@ def loadimage(imagepaths,maskpaths,opt,test_flag = False): for i in range(len(imagepaths)): img = impro.resize(impro.imread(imagepaths[i]),opt.loadsize) mask = impro.resize(impro.imread(maskpaths[i],mod = 'gray'),opt.loadsize) - img,mask = data.random_transform_image(img, mask, opt.finesize, test_flag) + img,mask = data.random_transform_pair_image(img, mask, opt.finesize, test_flag) images[i] = (img.transpose((2, 0, 1))/255.0) masks[i] = (mask.reshape(1,1,opt.finesize,opt.finesize)/255.0) images = Totensor(images,opt.use_gpu) diff --git a/train/clean/train.py b/train/clean/train.py index 70efb41..49c886f 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -1,310 +1,317 @@ -import os -import sys -sys.path.append("..") -sys.path.append("../..") -from cores import Options -opt = Options() - -import numpy as np -import cv2 -import random -import torch -import torch.nn as nn -import time -from multiprocessing import Process, Queue - -from util import mosaic,util,ffmpeg,filt,data -from util import image_processing as impro -from models import pix2pix_model,pix2pixHD_model,video_model,unet_model,loadmodel,videoHD_model -import matplotlib -matplotlib.use('Agg') -from matplotlib import pyplot as plt -import torch.backends.cudnn as cudnn - -''' ---------------------------Get options-------------------------- -''' -opt.parser.add_argument('--N',type=int,default=25, help='') -opt.parser.add_argument('--lr',type=float,default=0.0002, help='') -opt.parser.add_argument('--beta1',type=float,default=0.5, help='') -opt.parser.add_argument('--gan', action='store_true', help='if specified, use gan') -opt.parser.add_argument('--l2', action='store_true', help='if specified, use L2 loss') -opt.parser.add_argument('--hd', action='store_true', help='if specified, use HD model') -opt.parser.add_argument('--lambda_L1',type=float,default=100, help='') -opt.parser.add_argument('--lambda_gan',type=float,default=1, help='') -opt.parser.add_argument('--finesize',type=int,default=256, help='') -opt.parser.add_argument('--loadsize',type=int,default=286, help='') -opt.parser.add_argument('--batchsize',type=int,default=1, help='') -opt.parser.add_argument('--norm',type=str,default='instance', help='') -opt.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') -opt.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') -opt.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') -opt.parser.add_argument('--image_pool',type=int,default=8, help='number of image load pool') -opt.parser.add_argument('--load_process',type=int,default=4, help='number of process for loading data') - -opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') -opt.parser.add_argument('--maxiter',type=int,default=10000000, help='') -opt.parser.add_argument('--savefreq',type=int,default=10000, help='') -opt.parser.add_argument('--startiter',type=int,default=0, help='') -opt.parser.add_argument('--continue_train', action='store_true', help='') -opt.parser.add_argument('--savename',type=str,default='face', help='') - - -''' ---------------------------Init-------------------------- -''' -opt = opt.getparse() -dir_checkpoint = os.path.join('checkpoints/',opt.savename) -util.makedirs(dir_checkpoint) -util.writelog(os.path.join(dir_checkpoint,'loss.txt'), - str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt)) -cudnn.benchmark = True - -N = opt.N -loss_sum = [0.,0.,0.,0.,0.,0] -loss_plot = [[],[],[],[]] -item_plot = [] - -# list video dir -videonames = os.listdir(opt.dataset) -videonames.sort() -lengths = [];tmp = [] -print('Check dataset...') -for video in videonames: - if video != 'opt.txt': - video_images = os.listdir(os.path.join(opt.dataset,video,'origin_image')) - lengths.append(len(video_images)) - tmp.append(video) -videonames = tmp -video_num = len(videonames) - -#--------------------------Init network-------------------------- -print('Init network...') -if opt.hd: - netG = videoHD_model.MosaicNet(3*N+1, 3, norm=opt.norm) -else: - netG = video_model.MosaicNet(3*N+1, 3, norm=opt.norm) -netG.cuda() -loadmodel.show_paramsnumber(netG,'netG') - -if opt.gan: - if opt.hd: - netD = pix2pixHD_model.define_D(6, 64, opt.n_layers_D, norm = opt.norm, use_sigmoid=False, num_D=opt.num_D,getIntermFeat=True) - else: - netD = pix2pix_model.define_D(3*2, 64, 'basic', norm = opt.norm) - netD.cuda() - netD.train() - -#--------------------------continue train-------------------------- -if opt.continue_train: - if not os.path.isfile(os.path.join(dir_checkpoint,'last_G.pth')): - opt.continue_train = False - print('can not load last_G, training on init weight.') -if opt.continue_train: - netG.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_G.pth'))) - if opt.gan: - netD.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_D.pth'))) - f = open(os.path.join(dir_checkpoint,'iter'),'r') - opt.startiter = int(f.read()) - f.close() - -#--------------------------optimizer & loss-------------------------- -optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) -criterion_L1 = nn.L1Loss() -criterion_L2 = nn.MSELoss() -if opt.gan: - optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) - if opt.hd: - criterionGAN = pix2pixHD_model.GANLoss(tensor=torch.cuda.FloatTensor).cuda() - criterionFeat = pix2pixHD_model.GAN_Feat_loss(opt) - criterionVGG = pix2pixHD_model.VGGLoss([opt.use_gpu]) - else: - criterionGAN = pix2pix_model.GANLoss(gan_mode='lsgan').cuda() - -''' ---------------------------preload data & data pool-------------------------- -''' -print('Preloading data, please wait...') -def preload(pool): - cnt = 0 - input_imgs = torch.rand(opt.batchsize,N*3+1,opt.finesize,opt.finesize) - ground_trues = torch.rand(opt.batchsize,3,opt.finesize,opt.finesize) - while 1: - try: - for i in range(opt.batchsize): - video_index = random.randint(0,video_num-1) - videoname = videonames[video_index] - img_index = random.randint(int(N/2)+1,lengths[video_index]- int(N/2)-1) - input_imgs[i],ground_trues[i] = data.load_train_video(videoname,img_index,opt) - cnt += 1 - pool.put([input_imgs,ground_trues]) - except Exception as e: - print("Error:",videoname,e) -pool = Queue(opt.image_pool) -for i in range(opt.load_process): - p = Process(target=preload,args=(pool,)) - p.daemon = True - p.start() - -''' ---------------------------train-------------------------- -''' -util.copyfile('./train.py', os.path.join(dir_checkpoint,'train.py')) -util.copyfile('../../models/videoHD_model.py', os.path.join(dir_checkpoint,'model.py')) -netG.train() -time_start=time.time() -print("Begin training...") -for iter in range(opt.startiter+1,opt.maxiter): - - inputdata,target = pool.get() - inputdata,target = inputdata.cuda(),target.cuda() - - if opt.gan: - # compute fake images: G(A) - pred = netG(inputdata) - real_A = inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:] - - # --------------------update D-------------------- - pix2pix_model.set_requires_grad(netD,True) - optimizer_D.zero_grad() - # Fake - fake_AB = torch.cat((real_A, pred), 1) - pred_fake = netD(fake_AB.detach()) - loss_D_fake = criterionGAN(pred_fake, False) - # Real - real_AB = torch.cat((real_A, target), 1) - pred_real = netD(real_AB) - loss_D_real = criterionGAN(pred_real, True) - # combine loss and calculate gradients - loss_D = (loss_D_fake + loss_D_real) * 0.5 - loss_sum[4] += loss_D_fake.item() - loss_sum[5] += loss_D_real.item() - # udpate D's weights - loss_D.backward() - optimizer_D.step() - - # --------------------update G-------------------- - pix2pix_model.set_requires_grad(netD,False) - optimizer_G.zero_grad() - - # First, G(A) should fake the discriminator - fake_AB = torch.cat((real_A, pred), 1) - pred_fake = netD(fake_AB) - loss_G_GAN = criterionGAN(pred_fake, True)*opt.lambda_gan - - # combine loss and calculate gradients - if opt.l2: - loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 - else: - loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 - - if opt.hd: - real_AB = torch.cat((real_A, target), 1) - pred_real = netD(real_AB) - loss_G_GAN_Feat = criterionFeat(pred_fake,pred_real) - loss_VGG = criterionVGG(pred, target) * opt.lambda_feat - loss_G = loss_G_GAN + loss_G_L1 + loss_G_GAN_Feat + loss_VGG - else: - loss_G = loss_G_GAN + loss_G_L1 - loss_sum[0] += loss_G_L1.item() - loss_sum[1] += loss_G_GAN.item() - loss_sum[2] += loss_G_GAN_Feat.item() - loss_sum[3] += loss_VGG.item() - - # udpate G's weights - loss_G.backward() - optimizer_G.step() - - else: - pred = netG(inputdata) - if opt.l2: - loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 - else: - loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 - loss_sum[0] += loss_G_L1.item() - - optimizer_G.zero_grad() - loss_G_L1.backward() - optimizer_G.step() - - # save train result - if (iter+1)%1000 == 0: - try: - data.showresult(inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:], - target, pred, os.path.join(dir_checkpoint,'result_train.jpg')) - except Exception as e: - print(e) - - # plot - if (iter+1)%1000 == 0: - time_end = time.time() - #if opt.gan: - savestr ='iter:{0:d} L1_loss:{1:.3f} GAN_loss:{2:.3f} Feat:{3:.3f} VGG:{4:.3f} time:{5:.2f}'.format( - iter+1,loss_sum[0]/1000,loss_sum[1]/1000,loss_sum[2]/1000,loss_sum[3]/1000,(time_end-time_start)/1000) - util.writelog(os.path.join(dir_checkpoint,'loss.txt'), savestr,True) - if (iter+1)/1000 >= 10: - for i in range(4):loss_plot[i].append(loss_sum[i]/1000) - item_plot.append(iter+1) - try: - labels = ['L1_loss','GAN_loss','GAN_Feat_loss','VGG_loss'] - for i in range(4):plt.plot(item_plot,loss_plot[i],label=labels[i]) - plt.xlabel('iter') - plt.legend(loc=1) - plt.savefig(os.path.join(dir_checkpoint,'loss.jpg')) - plt.close() - except Exception as e: - print("error:",e) - - loss_sum = [0.,0.,0.,0.,0.,0.] - time_start=time.time() - - # save network - if (iter+1)%(opt.savefreq//10) == 0: - torch.save(netG.cpu().state_dict(),os.path.join(dir_checkpoint,'last_G.pth')) - if opt.gan: - torch.save(netD.cpu().state_dict(),os.path.join(dir_checkpoint,'last_D.pth')) - if opt.use_gpu !=-1 : - netG.cuda() - if opt.gan: - netD.cuda() - f = open(os.path.join(dir_checkpoint,'iter'),'w+') - f.write(str(iter+1)) - f.close() - - if (iter+1)%opt.savefreq == 0: - os.rename(os.path.join(dir_checkpoint,'last_G.pth'),os.path.join(dir_checkpoint,str(iter+1)+'G.pth')) - if opt.gan: - os.rename(os.path.join(dir_checkpoint,'last_D.pth'),os.path.join(dir_checkpoint,str(iter+1)+'D.pth')) - print('network saved.') - - #test - if (iter+1)%opt.savefreq == 0: - if os.path.isdir('./test'): - netG.eval() - - test_names = os.listdir('./test') - test_names.sort() - result = np.zeros((opt.finesize*2,opt.finesize*len(test_names),3), dtype='uint8') - - for cnt,test_name in enumerate(test_names,0): - img_names = os.listdir(os.path.join('./test',test_name,'image')) - img_names.sort() - inputdata = np.zeros((opt.finesize,opt.finesize,3*N+1), dtype='uint8') - for i in range(0,N): - img = impro.imread(os.path.join('./test',test_name,'image',img_names[i])) - img = impro.resize(img,opt.finesize) - inputdata[:,:,i*3:(i+1)*3] = img - - mask = impro.imread(os.path.join('./test',test_name,'mask.png'),'gray') - mask = impro.resize(mask,opt.finesize) - mask = impro.mask_threshold(mask,15,128) - inputdata[:,:,-1] = mask - result[0:opt.finesize,opt.finesize*cnt:opt.finesize*(cnt+1),:] = inputdata[:,:,int((N-1)/2)*3:(int((N-1)/2)+1)*3] - inputdata = data.im2tensor(inputdata,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False) - pred = netG(inputdata) - - pred = data.tensor2im(pred,rgb2bgr = False, is0_1 = False) - result[opt.finesize:opt.finesize*2,opt.finesize*cnt:opt.finesize*(cnt+1),:] = pred - - cv2.imwrite(os.path.join(dir_checkpoint,str(iter+1)+'_test.jpg'), result) - netG.train() \ No newline at end of file +import os +import sys +sys.path.append("..") +sys.path.append("../..") +from cores import Options +opt = Options() + +import numpy as np +import cv2 +import random +import torch +import torch.nn as nn +import time +from multiprocessing import Process, Queue + +from util import mosaic,util,ffmpeg,filt,data +from util import image_processing as impro +from models import pix2pix_model,pix2pixHD_model,video_model,unet_model,loadmodel,videoHD_model,BVDNet,model_util +import torch.backends.cudnn as cudnn +from tensorboardX import SummaryWriter + +''' +--------------------------Get options-------------------------- +''' +opt.parser.add_argument('--N',type=int,default=2, help='The input tensor shape is H×W×T×C, T = 2N+1') +opt.parser.add_argument('--S',type=int,default=3, help='Stride of 3 frames') +# opt.parser.add_argument('--T',type=int,default=7, help='T = 2N+1') +opt.parser.add_argument('--M',type=int,default=100, help='How many frames read from each videos') +opt.parser.add_argument('--lr',type=float,default=0.001, help='') +opt.parser.add_argument('--beta1',type=float,default=0.9, help='') +opt.parser.add_argument('--beta2',type=float,default=0.999, help='') +opt.parser.add_argument('--finesize',type=int,default=256, help='') +opt.parser.add_argument('--loadsize',type=int,default=286, help='') +opt.parser.add_argument('--batchsize',type=int,default=1, help='') +opt.parser.add_argument('--lambda_VGG',type=float,default=0.1, help='') +opt.parser.add_argument('--load_thread',type=int,default=4, help='number of thread for loading data') + +opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') +opt.parser.add_argument('--n_epoch',type=int,default=200, help='') +opt.parser.add_argument('--save_freq',type=int,default=100000, help='') +opt.parser.add_argument('--continue_train', action='store_true', help='') +opt.parser.add_argument('--savename',type=str,default='face', help='') +opt.parser.add_argument('--showresult_freq',type=int,default=1000, help='') +opt.parser.add_argument('--showresult_num',type=int,default=4, help='') +opt.parser.add_argument('--psnr_freq',type=int,default=100, help='') + +class TrainVideoLoader(object): + """docstring for VideoLoader + 1.Init TrainVideoLoader as loader + 2.Get data by loader.ori_stream + 3.loader.next() + """ + def __init__(self, opt, video_dir, test_flag=False): + super(TrainVideoLoader, self).__init__() + self.opt = opt + self.test_flag = test_flag + self.video_dir = video_dir + self.t = 0 + self.n_iter = self.opt.M -self.opt.S*(self.opt.T+1) + self.transform_params = data.get_transform_params() + self.ori_load_pool = [] + self.mosaic_load_pool = [] + self.last_pred = None + feg_ori = impro.imread(os.path.join(video_dir,'origin_image','00001.jpg'),loadsize=self.opt.loadsize,rgb=True) + feg_mask = impro.imread(os.path.join(video_dir,'mask','00001.png'),mod='gray',loadsize=self.opt.loadsize) + self.mosaic_size,self.mod,self.rect_rat,self.feather = mosaic.get_random_parameter(feg_ori,feg_mask) + self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] + + #Init load pool + for i in range(self.opt.S*self.opt.T): + #print(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg')) + _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) + _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) + _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) + # _ori_img = data.random_transform_single_image(_ori_img, opt.finesize,self.transform_params,self.test_flag) + # _mosaic_img = data.random_transform_single_image(_mosaic_img, opt.finesize,self.transform_params,self.test_flag) + self.ori_load_pool.append(self.normalize(_ori_img)) + self.mosaic_load_pool.append(self.normalize(_mosaic_img)) + self.ori_load_pool = np.array(self.ori_load_pool) + self.mosaic_load_pool = np.array(self.mosaic_load_pool) + + #Init frist stream + self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + # stream B,T,H,W,C -> B,C,T,H,W + self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + + #Init frist previous frame + self.last_pred = self.ori_load_pool[self.opt.S*self.opt.N-1].copy() + # previous B,C,H,W + self.last_pred = self.last_pred.reshape(1,opt.finesize,opt.finesize,3).transpose((0,3,1,2)) + + def normalize(self,data): + return (data.astype(np.float32)/255.0-0.5)/0.5 + + def next(self): + if self.t != 0: + self.last_pred = None + self.ori_load_pool [:self.opt.S*self.opt.T-1] = self.ori_load_pool [1:self.opt.S*self.opt.T] + self.mosaic_load_pool[:self.opt.S*self.opt.T-1] = self.mosaic_load_pool[1:self.opt.S*self.opt.T] + #print(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg')) + _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) + _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) + _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) + # if np.random.random() < 0.01: + # print('1') + # cv2.imwrite(util.randomstr(10)+'.jpg', _ori_img) + + # _ori_img = data.random_transform_single_image(_ori_img, opt.finesize,self.transform_params,self.test_flag) + # _mosaic_img = data.random_transform_single_image(_mosaic_img, opt.finesize,self.transform_params,self.test_flag) + _ori_img,_mosaic_img = self.normalize(_ori_img),self.normalize(_mosaic_img) + self.ori_load_pool [self.opt.S*self.opt.T-1] = _ori_img + self.mosaic_load_pool[self.opt.S*self.opt.T-1] = _mosaic_img + + self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + + if np.random.random() < 0.01: + # print(self.ori_stream[0,0].shape) + print('1') + cv2.imwrite(util.randomstr(10)+'.jpg', self.ori_stream[0]) + + # stream B,T,H,W,C -> B,C,T,H,W + self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + + self.t += 1 + +class DataLoader(object): + """DataLoader""" + def __init__(self, opt, videolist, test_flag=False): + super(DataLoader, self).__init__() + self.videolist = [] + self.opt = opt + self.test_flag = test_flag + for i in range(self.opt.n_epoch): + self.videolist += videolist + random.shuffle(self.videolist) + self.each_video_n_iter = self.opt.M -self.opt.S*(self.opt.T+1) + self.n_iter = len(self.videolist)//self.opt.load_thread//self.opt.batchsize*self.each_video_n_iter*self.opt.load_thread + self.queue = Queue(self.opt.load_thread) + self.ori_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W + self.mosaic_stream = self.ori_stream.copy() + self.last_pred = np.zeros((self.opt.batchsize,3,self.opt.finesize,self.opt.finesize),dtype=np.float32) + + def load(self,videolist): + for load_video_iter in range(len(videolist)//self.opt.batchsize): + iter_videolist = videolist[load_video_iter*self.opt.batchsize:(load_video_iter+1)*self.opt.batchsize] + videoloaders = [TrainVideoLoader(self.opt,os.path.join(self.opt.dataset,iter_videolist[i]),self.test_flag) for i in range(self.opt.batchsize)] + for each_video_iter in range(self.each_video_n_iter): + for i in range(self.opt.batchsize): + self.ori_stream[i] = videoloaders[i].ori_stream + self.mosaic_stream[i] = videoloaders[i].mosaic_stream + if each_video_iter == 0: + self.last_pred[i] = videoloaders[i].last_pred + videoloaders[i].next() + if each_video_iter == 0: + self.queue.put([self.ori_stream,self.mosaic_stream,self.last_pred]) + else: + self.queue.put([self.ori_stream,self.mosaic_stream,None]) + + def load_init(self): + ptvn = len(self.videolist)//self.opt.load_thread #pre_thread_video_num + for i in range(self.opt.load_thread): + p = Process(target=self.load,args=(self.videolist[i*ptvn:(i+1)*ptvn],)) + p.daemon = True + p.start() + + def get_data(self): + return self.queue.get() + +''' +--------------------------Init-------------------------- +''' +opt = opt.getparse() +opt.T = 2*opt.N+1 +if opt.showresult_num >opt.batchsize: + opt.showresult_num = opt.batchsize +dir_checkpoint = os.path.join('checkpoints',opt.savename) +util.makedirs(dir_checkpoint) +# start tensorboard +localtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) +tensorboard_savedir = os.path.join('checkpoints/tensorboard',localtime+'_'+opt.savename) +TBGlobalWriter = SummaryWriter(tensorboard_savedir) +net = BVDNet.BVDNet(opt.N) + + +if opt.use_gpu != '-1' and len(opt.use_gpu) == 1: + torch.backends.cudnn.benchmark = True + net.cuda() +elif opt.use_gpu != '-1' and len(opt.use_gpu) > 1: + torch.backends.cudnn.benchmark = True + net = nn.DataParallel(net) + net.cuda() + +optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) +lossf_L1 = nn.L1Loss() +lossf_VGG = BVDNet.VGGLoss([opt.use_gpu]) + +videolist_tmp = os.listdir(opt.dataset) +videolist = [] +for video in videolist_tmp: + if os.path.isdir(os.path.join(opt.dataset,video)): + if len(os.listdir(os.path.join(opt.dataset,video,'mask')))>=opt.M: + videolist.append(video) +sorted(videolist) +videolist_train = videolist[:int(len(videolist)*0.8)].copy() +videolist_eval = videolist[int(len(videolist)*0.8):].copy() + +dataloader_train = DataLoader(opt, videolist_train) +dataloader_train.load_init() +dataloader_eval = DataLoader(opt, videolist_eval) +dataloader_eval.load_init() + +previous_predframe_train = 0 +previous_predframe_eval = 0 +for train_iter in range(dataloader_train.n_iter): + t_start = time.time() + # train + ori_stream,mosaic_stream,last_frame = dataloader_train.get_data() + ori_stream = data.to_tensor(ori_stream, opt.use_gpu) + mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) + if last_frame is None: + last_frame = data.to_tensor(previous_predframe_train, opt.use_gpu) + else: + last_frame = data.to_tensor(last_frame, opt.use_gpu) + optimizer.zero_grad() + out = net(mosaic_stream,last_frame) + loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) + loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG + TBGlobalWriter.add_scalars('loss/train', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) + loss = loss_L1+loss_VGG + loss.backward() + optimizer.step() + previous_predframe_train = out.detach().cpu().numpy() + + # save network + if train_iter%opt.save_freq == 0 and train_iter != 0: + model_util.save(net, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.use_gpu) + + # psnr + if train_iter%opt.psnr_freq ==0: + psnr = 0 + for i in range(len(out)): + psnr += impro.psnr(data.tensor2im(out,batch_index=i), data.tensor2im(ori_stream[:,:,opt.N],batch_index=i)) + TBGlobalWriter.add_scalars('psnr', {'train':psnr/len(out)}, train_iter) + + if train_iter % opt.showresult_freq == 0: + show_imgs = [] + for i in range(opt.showresult_num): + show_imgs += [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False,batch_index=i), + data.tensor2im(out,rgb2bgr = False,batch_index=i), + data.tensor2im(ori_stream[:,:,opt.N],rgb2bgr = False,batch_index=i)] + show_img = impro.splice(show_imgs, (opt.showresult_num,3)) + TBGlobalWriter.add_image('train', show_img,train_iter,dataformats='HWC') + + # eval + if (train_iter)%5 ==0: + ori_stream,mosaic_stream,last_frame = dataloader_eval.get_data() + ori_stream = data.to_tensor(ori_stream, opt.use_gpu) + mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) + if last_frame is None: + last_frame = data.to_tensor(previous_predframe_eval, opt.use_gpu) + else: + last_frame = data.to_tensor(last_frame, opt.use_gpu) + with torch.no_grad(): + out = net(mosaic_stream,last_frame) + loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) + loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG + TBGlobalWriter.add_scalars('loss/eval', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) + previous_predframe_eval = out.detach().cpu().numpy() + + #psnr + if (train_iter)%opt.psnr_freq ==0: + psnr = 0 + for i in range(len(out)): + psnr += impro.psnr(data.tensor2im(out,batch_index=i), data.tensor2im(ori_stream[:,:,opt.N],batch_index=i)) + TBGlobalWriter.add_scalars('psnr', {'eval':psnr/len(out)}, train_iter) + + if train_iter % opt.showresult_freq == 0: + show_imgs = [] + for i in range(opt.showresult_num): + show_imgs += [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False,batch_index=i), + data.tensor2im(out,rgb2bgr = False,batch_index=i), + data.tensor2im(ori_stream[:,:,opt.N],rgb2bgr = False,batch_index=i)] + show_img = impro.splice(show_imgs, (opt.showresult_num,3)) + TBGlobalWriter.add_image('eval', show_img,train_iter,dataformats='HWC') + t_end = time.time() + print('iter:{0:d} t:{1:.2f} l1:{2:.4f} vgg:{3:.4f} psnr:{4:.2f}'.format(train_iter,t_end-t_start, + loss_L1.item(),loss_VGG.item(),psnr/len(out)) ) + t_strat = time.time() + + # test + test_dir = '../../datasets/video_test' + if train_iter % opt.showresult_freq == 0 and os.path.isdir(test_dir): + show_imgs = [] + videos = os.listdir(test_dir) + sorted(videos) + for video in videos: + frames = os.listdir(os.path.join(test_dir,video,'image')) + sorted(frames) + mosaic_stream = [] + for i in range(opt.T): + _mosaic = impro.imread(os.path.join(test_dir,video,'image',frames[i*opt.S]),loadsize=opt.finesize,rgb=True) + mosaic_stream.append(_mosaic) + previous = impro.imread(os.path.join(test_dir,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) + mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 + mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) + previous = data.im2tensor(previous,bgr2rgb = False, use_gpu = opt.use_gpu,use_transform = False, is0_1 = False) + with torch.no_grad(): + out = net(mosaic_stream,previous) + show_imgs+= [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False),data.tensor2im(out,rgb2bgr = False)] + + show_img = impro.splice(show_imgs, (len(videos),2)) + TBGlobalWriter.add_image('test', show_img,train_iter,dataformats='HWC') diff --git a/train/clean/train_old.py b/train/clean/train_old.py new file mode 100644 index 0000000..70efb41 --- /dev/null +++ b/train/clean/train_old.py @@ -0,0 +1,310 @@ +import os +import sys +sys.path.append("..") +sys.path.append("../..") +from cores import Options +opt = Options() + +import numpy as np +import cv2 +import random +import torch +import torch.nn as nn +import time +from multiprocessing import Process, Queue + +from util import mosaic,util,ffmpeg,filt,data +from util import image_processing as impro +from models import pix2pix_model,pix2pixHD_model,video_model,unet_model,loadmodel,videoHD_model +import matplotlib +matplotlib.use('Agg') +from matplotlib import pyplot as plt +import torch.backends.cudnn as cudnn + +''' +--------------------------Get options-------------------------- +''' +opt.parser.add_argument('--N',type=int,default=25, help='') +opt.parser.add_argument('--lr',type=float,default=0.0002, help='') +opt.parser.add_argument('--beta1',type=float,default=0.5, help='') +opt.parser.add_argument('--gan', action='store_true', help='if specified, use gan') +opt.parser.add_argument('--l2', action='store_true', help='if specified, use L2 loss') +opt.parser.add_argument('--hd', action='store_true', help='if specified, use HD model') +opt.parser.add_argument('--lambda_L1',type=float,default=100, help='') +opt.parser.add_argument('--lambda_gan',type=float,default=1, help='') +opt.parser.add_argument('--finesize',type=int,default=256, help='') +opt.parser.add_argument('--loadsize',type=int,default=286, help='') +opt.parser.add_argument('--batchsize',type=int,default=1, help='') +opt.parser.add_argument('--norm',type=str,default='instance', help='') +opt.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') +opt.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') +opt.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') +opt.parser.add_argument('--image_pool',type=int,default=8, help='number of image load pool') +opt.parser.add_argument('--load_process',type=int,default=4, help='number of process for loading data') + +opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') +opt.parser.add_argument('--maxiter',type=int,default=10000000, help='') +opt.parser.add_argument('--savefreq',type=int,default=10000, help='') +opt.parser.add_argument('--startiter',type=int,default=0, help='') +opt.parser.add_argument('--continue_train', action='store_true', help='') +opt.parser.add_argument('--savename',type=str,default='face', help='') + + +''' +--------------------------Init-------------------------- +''' +opt = opt.getparse() +dir_checkpoint = os.path.join('checkpoints/',opt.savename) +util.makedirs(dir_checkpoint) +util.writelog(os.path.join(dir_checkpoint,'loss.txt'), + str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt)) +cudnn.benchmark = True + +N = opt.N +loss_sum = [0.,0.,0.,0.,0.,0] +loss_plot = [[],[],[],[]] +item_plot = [] + +# list video dir +videonames = os.listdir(opt.dataset) +videonames.sort() +lengths = [];tmp = [] +print('Check dataset...') +for video in videonames: + if video != 'opt.txt': + video_images = os.listdir(os.path.join(opt.dataset,video,'origin_image')) + lengths.append(len(video_images)) + tmp.append(video) +videonames = tmp +video_num = len(videonames) + +#--------------------------Init network-------------------------- +print('Init network...') +if opt.hd: + netG = videoHD_model.MosaicNet(3*N+1, 3, norm=opt.norm) +else: + netG = video_model.MosaicNet(3*N+1, 3, norm=opt.norm) +netG.cuda() +loadmodel.show_paramsnumber(netG,'netG') + +if opt.gan: + if opt.hd: + netD = pix2pixHD_model.define_D(6, 64, opt.n_layers_D, norm = opt.norm, use_sigmoid=False, num_D=opt.num_D,getIntermFeat=True) + else: + netD = pix2pix_model.define_D(3*2, 64, 'basic', norm = opt.norm) + netD.cuda() + netD.train() + +#--------------------------continue train-------------------------- +if opt.continue_train: + if not os.path.isfile(os.path.join(dir_checkpoint,'last_G.pth')): + opt.continue_train = False + print('can not load last_G, training on init weight.') +if opt.continue_train: + netG.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_G.pth'))) + if opt.gan: + netD.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_D.pth'))) + f = open(os.path.join(dir_checkpoint,'iter'),'r') + opt.startiter = int(f.read()) + f.close() + +#--------------------------optimizer & loss-------------------------- +optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) +criterion_L1 = nn.L1Loss() +criterion_L2 = nn.MSELoss() +if opt.gan: + optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) + if opt.hd: + criterionGAN = pix2pixHD_model.GANLoss(tensor=torch.cuda.FloatTensor).cuda() + criterionFeat = pix2pixHD_model.GAN_Feat_loss(opt) + criterionVGG = pix2pixHD_model.VGGLoss([opt.use_gpu]) + else: + criterionGAN = pix2pix_model.GANLoss(gan_mode='lsgan').cuda() + +''' +--------------------------preload data & data pool-------------------------- +''' +print('Preloading data, please wait...') +def preload(pool): + cnt = 0 + input_imgs = torch.rand(opt.batchsize,N*3+1,opt.finesize,opt.finesize) + ground_trues = torch.rand(opt.batchsize,3,opt.finesize,opt.finesize) + while 1: + try: + for i in range(opt.batchsize): + video_index = random.randint(0,video_num-1) + videoname = videonames[video_index] + img_index = random.randint(int(N/2)+1,lengths[video_index]- int(N/2)-1) + input_imgs[i],ground_trues[i] = data.load_train_video(videoname,img_index,opt) + cnt += 1 + pool.put([input_imgs,ground_trues]) + except Exception as e: + print("Error:",videoname,e) +pool = Queue(opt.image_pool) +for i in range(opt.load_process): + p = Process(target=preload,args=(pool,)) + p.daemon = True + p.start() + +''' +--------------------------train-------------------------- +''' +util.copyfile('./train.py', os.path.join(dir_checkpoint,'train.py')) +util.copyfile('../../models/videoHD_model.py', os.path.join(dir_checkpoint,'model.py')) +netG.train() +time_start=time.time() +print("Begin training...") +for iter in range(opt.startiter+1,opt.maxiter): + + inputdata,target = pool.get() + inputdata,target = inputdata.cuda(),target.cuda() + + if opt.gan: + # compute fake images: G(A) + pred = netG(inputdata) + real_A = inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:] + + # --------------------update D-------------------- + pix2pix_model.set_requires_grad(netD,True) + optimizer_D.zero_grad() + # Fake + fake_AB = torch.cat((real_A, pred), 1) + pred_fake = netD(fake_AB.detach()) + loss_D_fake = criterionGAN(pred_fake, False) + # Real + real_AB = torch.cat((real_A, target), 1) + pred_real = netD(real_AB) + loss_D_real = criterionGAN(pred_real, True) + # combine loss and calculate gradients + loss_D = (loss_D_fake + loss_D_real) * 0.5 + loss_sum[4] += loss_D_fake.item() + loss_sum[5] += loss_D_real.item() + # udpate D's weights + loss_D.backward() + optimizer_D.step() + + # --------------------update G-------------------- + pix2pix_model.set_requires_grad(netD,False) + optimizer_G.zero_grad() + + # First, G(A) should fake the discriminator + fake_AB = torch.cat((real_A, pred), 1) + pred_fake = netD(fake_AB) + loss_G_GAN = criterionGAN(pred_fake, True)*opt.lambda_gan + + # combine loss and calculate gradients + if opt.l2: + loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 + else: + loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 + + if opt.hd: + real_AB = torch.cat((real_A, target), 1) + pred_real = netD(real_AB) + loss_G_GAN_Feat = criterionFeat(pred_fake,pred_real) + loss_VGG = criterionVGG(pred, target) * opt.lambda_feat + loss_G = loss_G_GAN + loss_G_L1 + loss_G_GAN_Feat + loss_VGG + else: + loss_G = loss_G_GAN + loss_G_L1 + loss_sum[0] += loss_G_L1.item() + loss_sum[1] += loss_G_GAN.item() + loss_sum[2] += loss_G_GAN_Feat.item() + loss_sum[3] += loss_VGG.item() + + # udpate G's weights + loss_G.backward() + optimizer_G.step() + + else: + pred = netG(inputdata) + if opt.l2: + loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 + else: + loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 + loss_sum[0] += loss_G_L1.item() + + optimizer_G.zero_grad() + loss_G_L1.backward() + optimizer_G.step() + + # save train result + if (iter+1)%1000 == 0: + try: + data.showresult(inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:], + target, pred, os.path.join(dir_checkpoint,'result_train.jpg')) + except Exception as e: + print(e) + + # plot + if (iter+1)%1000 == 0: + time_end = time.time() + #if opt.gan: + savestr ='iter:{0:d} L1_loss:{1:.3f} GAN_loss:{2:.3f} Feat:{3:.3f} VGG:{4:.3f} time:{5:.2f}'.format( + iter+1,loss_sum[0]/1000,loss_sum[1]/1000,loss_sum[2]/1000,loss_sum[3]/1000,(time_end-time_start)/1000) + util.writelog(os.path.join(dir_checkpoint,'loss.txt'), savestr,True) + if (iter+1)/1000 >= 10: + for i in range(4):loss_plot[i].append(loss_sum[i]/1000) + item_plot.append(iter+1) + try: + labels = ['L1_loss','GAN_loss','GAN_Feat_loss','VGG_loss'] + for i in range(4):plt.plot(item_plot,loss_plot[i],label=labels[i]) + plt.xlabel('iter') + plt.legend(loc=1) + plt.savefig(os.path.join(dir_checkpoint,'loss.jpg')) + plt.close() + except Exception as e: + print("error:",e) + + loss_sum = [0.,0.,0.,0.,0.,0.] + time_start=time.time() + + # save network + if (iter+1)%(opt.savefreq//10) == 0: + torch.save(netG.cpu().state_dict(),os.path.join(dir_checkpoint,'last_G.pth')) + if opt.gan: + torch.save(netD.cpu().state_dict(),os.path.join(dir_checkpoint,'last_D.pth')) + if opt.use_gpu !=-1 : + netG.cuda() + if opt.gan: + netD.cuda() + f = open(os.path.join(dir_checkpoint,'iter'),'w+') + f.write(str(iter+1)) + f.close() + + if (iter+1)%opt.savefreq == 0: + os.rename(os.path.join(dir_checkpoint,'last_G.pth'),os.path.join(dir_checkpoint,str(iter+1)+'G.pth')) + if opt.gan: + os.rename(os.path.join(dir_checkpoint,'last_D.pth'),os.path.join(dir_checkpoint,str(iter+1)+'D.pth')) + print('network saved.') + + #test + if (iter+1)%opt.savefreq == 0: + if os.path.isdir('./test'): + netG.eval() + + test_names = os.listdir('./test') + test_names.sort() + result = np.zeros((opt.finesize*2,opt.finesize*len(test_names),3), dtype='uint8') + + for cnt,test_name in enumerate(test_names,0): + img_names = os.listdir(os.path.join('./test',test_name,'image')) + img_names.sort() + inputdata = np.zeros((opt.finesize,opt.finesize,3*N+1), dtype='uint8') + for i in range(0,N): + img = impro.imread(os.path.join('./test',test_name,'image',img_names[i])) + img = impro.resize(img,opt.finesize) + inputdata[:,:,i*3:(i+1)*3] = img + + mask = impro.imread(os.path.join('./test',test_name,'mask.png'),'gray') + mask = impro.resize(mask,opt.finesize) + mask = impro.mask_threshold(mask,15,128) + inputdata[:,:,-1] = mask + result[0:opt.finesize,opt.finesize*cnt:opt.finesize*(cnt+1),:] = inputdata[:,:,int((N-1)/2)*3:(int((N-1)/2)+1)*3] + inputdata = data.im2tensor(inputdata,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False) + pred = netG(inputdata) + + pred = data.tensor2im(pred,rgb2bgr = False, is0_1 = False) + result[opt.finesize:opt.finesize*2,opt.finesize*cnt:opt.finesize*(cnt+1),:] = pred + + cv2.imwrite(os.path.join(dir_checkpoint,str(iter+1)+'_test.jpg'), result) + netG.train() \ No newline at end of file diff --git a/util/data.py b/util/data.py index c76dfa3..8884047 100755 --- a/util/data.py +++ b/util/data.py @@ -10,11 +10,18 @@ transforms.ToTensor(), transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) ] -) +) -def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = False): +def to_tensor(data,gpu_id): + data = torch.from_numpy(data) + if gpu_id != '-1': + data = data.cuda() + return data + + +def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = False, batch_index=0): image_tensor =image_tensor.data - image_numpy = image_tensor[0].cpu().float().numpy() + image_numpy = image_tensor[batch_index].cpu().float().numpy() if not is0_1: image_numpy = (image_numpy + 1)/2.0 @@ -58,7 +65,7 @@ def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = image_tensor = torch.from_numpy(image_numpy).float() if reshape: image_tensor = image_tensor.reshape(1,ch,h,w) - if use_gpu != -1: + if use_gpu != '-1': image_tensor = image_tensor.cuda() return image_tensor @@ -68,53 +75,53 @@ def shuffledata(data,target): np.random.set_state(state) np.random.shuffle(target) -def random_transform_video(src,target,finesize,N): - #random blur - if random.random()<0.2: - h,w = src.shape[:2] - src = src[:8*(h//8),:8*(w//8)] - Q_ran = random.randint(1,15) - src[:,:,:3*N] = impro.dctblur(src[:,:,:3*N],Q_ran) - target = impro.dctblur(target,Q_ran) +# def random_transform_video(src,target,finesize,N): +# #random blur +# if random.random()<0.2: +# h,w = src.shape[:2] +# src = src[:8*(h//8),:8*(w//8)] +# Q_ran = random.randint(1,15) +# src[:,:,:3*N] = impro.dctblur(src[:,:,:3*N],Q_ran) +# target = impro.dctblur(target,Q_ran) - #random crop - h,w = target.shape[:2] - h_move = int((h-finesize)*random.random()) - w_move = int((w-finesize)*random.random()) - target = target[h_move:h_move+finesize,w_move:w_move+finesize,:] - src = src[h_move:h_move+finesize,w_move:w_move+finesize,:] +# #random crop +# h,w = target.shape[:2] +# h_move = int((h-finesize)*random.random()) +# w_move = int((w-finesize)*random.random()) +# target = target[h_move:h_move+finesize,w_move:w_move+finesize,:] +# src = src[h_move:h_move+finesize,w_move:w_move+finesize,:] - #random flip - if random.random()<0.5: - src = src[:,::-1,:] - target = target[:,::-1,:] +# #random flip +# if random.random()<0.5: +# src = src[:,::-1,:] +# target = target[:,::-1,:] - #random color - alpha = random.uniform(-0.1,0.1) - beta = random.uniform(-0.1,0.1) - b = random.uniform(-0.05,0.05) - g = random.uniform(-0.05,0.05) - r = random.uniform(-0.05,0.05) - for i in range(N): - src[:,:,i*3:(i+1)*3] = impro.color_adjust(src[:,:,i*3:(i+1)*3],alpha,beta,b,g,r) - target = impro.color_adjust(target,alpha,beta,b,g,r) - - #random resize blur - if random.random()<0.5: - interpolations = [cv2.INTER_LINEAR,cv2.INTER_CUBIC,cv2.INTER_LANCZOS4] - size_ran = random.uniform(0.7,1.5) - interpolation_up = interpolations[random.randint(0,2)] - interpolation_down =interpolations[random.randint(0,2)] +# #random color +# alpha = random.uniform(-0.1,0.1) +# beta = random.uniform(-0.1,0.1) +# b = random.uniform(-0.05,0.05) +# g = random.uniform(-0.05,0.05) +# r = random.uniform(-0.05,0.05) +# for i in range(N): +# src[:,:,i*3:(i+1)*3] = impro.color_adjust(src[:,:,i*3:(i+1)*3],alpha,beta,b,g,r) +# target = impro.color_adjust(target,alpha,beta,b,g,r) + +# #random resize blur +# if random.random()<0.5: +# interpolations = [cv2.INTER_LINEAR,cv2.INTER_CUBIC,cv2.INTER_LANCZOS4] +# size_ran = random.uniform(0.7,1.5) +# interpolation_up = interpolations[random.randint(0,2)] +# interpolation_down =interpolations[random.randint(0,2)] - tmp = cv2.resize(src[:,:,:3*N], (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) - src[:,:,:3*N] = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) +# tmp = cv2.resize(src[:,:,:3*N], (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) +# src[:,:,:3*N] = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) - tmp = cv2.resize(target, (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) - target = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) +# tmp = cv2.resize(target, (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) +# target = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) - return src,target +# return src,target -def random_transform_single(img,out_shape): +def random_transform_single_mask(img,out_shape): out_h,out_w = out_shape img = cv2.resize(img,(int(out_w*random.uniform(1.1, 1.5)),int(out_h*random.uniform(1.1, 1.5)))) h,w = img.shape[:2] @@ -130,7 +137,72 @@ def random_transform_single(img,out_shape): img = cv2.resize(img,(out_w,out_h)) return img -def random_transform_image(img,mask,finesize,test_flag = False): +def get_transform_params(): + scale_flag = np.random.random()<0.2 + crop_flag = True + rotat_flag = np.random.random()<0.2 + color_flag = True + flip_flag = np.random.random()<0.2 + blur_flag = np.random.random()<0.1 + flag_dict = {'scale':scale_flag,'crop':crop_flag,'rotat':rotat_flag,'color':color_flag, + 'flip':flip_flag,'blur':blur_flag} + + scale_rate = np.random.uniform(0.9,1.1) + crop_rate = [np.random.random(),np.random.random()] + rotat_rate = np.random.random() + color_rate = [np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05), + np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05)] + flip_rate = np.random.random() + blur_rate = np.random.randint(1,15) + rate_dict = {'scale':scale_rate,'crop':crop_rate,'rotat':rotat_rate,'color':color_rate, + 'flip':flip_rate,'blur':blur_rate} + + return {'flag':flag_dict,'rate':rate_dict} + +def random_transform_single_image(img,finesize,params=None,test_flag = False): + if params is None: + params = get_transform_params() + if test_flag: + params['flag']['scale'] = False + if params['flag']['scale']: + h,w = img.shape[:2] + loadsize = min((h,w)) + a = (float(h)/float(w))*params['rate']['scale'] + if horiginal @@ -43,6 +43,9 @@ def imread(file_path,mod = 'normal',loadsize = 0): if loadsize != 0: img = resize(img, loadsize, interpolation=cv2.INTER_CUBIC) + if rgb and img.ndim==3: + img = img[:,:,::-1] + return img def imwrite(file_path,img): @@ -252,4 +255,27 @@ def replace_mosaic(img_origin,img_fake,mask,x,y,size,no_feather): img_result = img_origin.copy() img_result = (img_origin*(1-mask)+img_tmp*mask).astype('uint8') - return img_result \ No newline at end of file + return img_result + +def psnr(img1,img2): + mse = np.mean((img1/255.0-img2/255.0)**2) + if mse < 1e-10: + return 100 + psnr_v = 20*np.log10(1/np.sqrt(mse)) + return psnr_v + +def splice(imgs,splice_shape): + '''Stitching multiple images, all imgs must have the same size + imgs : [img1,img2,img3,img4] + splice_shape: (2,2) + ''' + h,w,ch = imgs[0].shape + output = np.zeros((h*splice_shape[0],w*splice_shape[1],ch),np.uint8) + cnt = 0 + for i in range(splice_shape[0]): + for j in range(splice_shape[1]): + if cnt < len(imgs): + output[h*i:h*(i+1),w*j:w*(j+1)] = imgs[cnt] + cnt += 1 + return output + diff --git a/util/util.py b/util/util.py index 571974e..4952df8 100755 --- a/util/util.py +++ b/util/util.py @@ -1,4 +1,6 @@ import os +import random +import string import shutil def Traversal(filedir): @@ -10,6 +12,9 @@ def Traversal(filedir): Traversal(dir) return file_list +def randomstr(num): + return ''.join(random.sample(string.ascii_letters + string.digits, num)) + def is_img(path): ext = os.path.splitext(path)[1] ext = ext.lower() From 8f4e9158d1b0ded134607e4f3f6ec68330788928 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Sun, 18 Apr 2021 21:41:42 +0800 Subject: [PATCH 2/9] Fix frame leak --- deepmosaic.py | 2 +- models/BVDNet.py | 44 ++++++++++++------------ models/model_util.py | 46 ++++++++++++++++++++++++- train/clean/train.py | 82 ++++++++++++++++++++------------------------ 4 files changed, 106 insertions(+), 68 deletions(-) diff --git a/deepmosaic.py b/deepmosaic.py index cfc8717..2571d38 100644 --- a/deepmosaic.py +++ b/deepmosaic.py @@ -76,7 +76,7 @@ def main(): except Exception as ex: print('--------------------ERROR--------------------') print('--------------Environment--------------') - print('DeepMosaics: 0.4.0') + print('DeepMosaics: 0.5.0') print('Python:',sys.version) import torch print('Pytorch:',torch.__version__) diff --git a/models/BVDNet.py b/models/BVDNet.py index eb26f04..ab5adeb 100644 --- a/models/BVDNet.py +++ b/models/BVDNet.py @@ -2,13 +2,13 @@ import torch.nn as nn import torch.nn.functional as F from .pix2pixHD_model import * +from .model_util import * class Encoder2d(nn.Module): - def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm2d): + def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm2d, activation = nn.ReLU(True)): super(Encoder2d, self).__init__() - activation = nn.ReLU(True) - + model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] ### downsample for i in range(n_downsampling): @@ -22,10 +22,9 @@ def forward(self, input): return self.model(input) class Encoder3d(nn.Module): - def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm3d): + def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm3d,activation = nn.ReLU(True)): super(Encoder3d, self).__init__() - activation = nn.ReLU(True) - + model = [nn.Conv3d(input_nc, ngf, kernel_size=3, padding=1), norm_layer(ngf), activation] ### downsample for i in range(n_downsampling): @@ -39,17 +38,18 @@ def forward(self, input): return self.model(input) class BVDNet(nn.Module): - def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3): + def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3,norm='batch',activation=nn.LeakyReLU(0.2)): super(BVDNet, self).__init__() ngf = 64 padding_type = 'reflect' - norm_layer = nn.BatchNorm2d + norm_layer = get_norm_layer(norm,'2d') + norm_layer_3d = get_norm_layer(norm,'3d') self.N = N - # encoder - self.encoder3d = Encoder3d(input_nc,64,n_downsampling) - self.encoder2d = Encoder2d(input_nc,64,n_downsampling) + # encoder + self.encoder3d = Encoder3d(input_nc,64,n_downsampling,norm_layer_3d,activation) + self.encoder2d = Encoder2d(input_nc,64,n_downsampling,norm_layer,activation) ### resnet blocks self.blocks = [] @@ -62,31 +62,31 @@ def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3): self.decoder = [] for i in range(n_downsampling): mult = 2**(n_downsampling - i) - # self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), - # norm_layer(int(ngf * mult / 2)), nn.ReLU(True)] - self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'), - nn.ReflectionPad2d(1), - nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), - norm_layer(int(ngf * mult / 2)), - nn.ReLU(True)] + self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), + norm_layer(int(ngf * mult / 2)), activation] + # self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'), + # nn.ReflectionPad2d(1), + # nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), + # norm_layer(int(ngf * mult / 2)), + # activation] self.decoder += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] self.decoder = nn.Sequential(*self.decoder) self.limiter = nn.Tanh() - def forward(self, stream, last): + def forward(self, stream, previous): this_shortcut = stream[:,:,self.N] stream = self.encoder3d(stream) stream = stream.reshape(stream.size(0),stream.size(1),stream.size(3),stream.size(4)) # print(stream.shape) - last = self.encoder2d(last) - x = stream + last + previous = self.encoder2d(previous) + x = stream + previous x = self.blocks(x) x = self.decoder(x) x = x+this_shortcut x = self.limiter(x) #print(x.shape) - # print(stream.shape,last.shape) + # print(stream.shape,previous.shape) return x class VGGLoss(nn.Module): diff --git a/models/model_util.py b/models/model_util.py index 8f1789f..8865d74 100644 --- a/models/model_util.py +++ b/models/model_util.py @@ -1,5 +1,8 @@ import torch import torch.nn as nn +from torch.nn import init +import functools + def save(net,path,gpu_id): if isinstance(net, nn.DataParallel): @@ -7,4 +10,45 @@ def save(net,path,gpu_id): else: torch.save(net.cpu().state_dict(),path) if gpu_id != '-1': - net.cuda() \ No newline at end of file + net.cuda() + +def get_norm_layer(norm_type='instance',mod = '2d'): + if norm_type == 'batch': + if mod == '2d': + norm_layer = functools.partial(nn.BatchNorm2d, affine=True) + elif mod == '3d': + norm_layer = functools.partial(nn.BatchNorm3d, affine=True) + elif norm_type == 'instance': + if mod == '2d': + norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=True) + elif mod =='3d': + norm_layer = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=True) + elif norm_type == 'none': + norm_layer = None + else: + raise NotImplementedError('normalization layer [%s] is not found' % norm_type) + + return norm_layer + +def init_weights(net, init_type='normal', gain=0.02): + def init_func(m): + classname = m.__class__.__name__ + if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1): + if init_type == 'normal': + init.normal_(m.weight.data, 0.0, gain) + elif init_type == 'xavier': + init.xavier_normal_(m.weight.data, gain=gain) + elif init_type == 'kaiming': + init.kaiming_normal_(m.weight.data, a=0, mode='fan_in') + elif init_type == 'orthogonal': + init.orthogonal_(m.weight.data, gain=gain) + else: + raise NotImplementedError('initialization method [%s] is not implemented' % init_type) + if hasattr(m, 'bias') and m.bias is not None: + init.constant_(m.bias.data, 0.0) + elif classname.find('BatchNorm2d') != -1: + init.normal_(m.weight.data, 1.0, gain) + init.constant_(m.bias.data, 0.0) + + print('initialize network with %s' % init_type) + net.apply(init_func) \ No newline at end of file diff --git a/train/clean/train.py b/train/clean/train.py index 49c886f..a944cd6 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -36,6 +36,7 @@ opt.parser.add_argument('--load_thread',type=int,default=4, help='number of thread for loading data') opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') +opt.parser.add_argument('--dataset_test',type=str,default='./datasets/face_test/', help='') opt.parser.add_argument('--n_epoch',type=int,default=200, help='') opt.parser.add_argument('--save_freq',type=int,default=100000, help='') opt.parser.add_argument('--continue_train', action='store_true', help='') @@ -46,9 +47,11 @@ class TrainVideoLoader(object): """docstring for VideoLoader + Load a single video(Converted to images) + How to use: 1.Init TrainVideoLoader as loader 2.Get data by loader.ori_stream - 3.loader.next() + 3.loader.next() to get next stream """ def __init__(self, opt, video_dir, test_flag=False): super(TrainVideoLoader, self).__init__() @@ -60,7 +63,7 @@ def __init__(self, opt, video_dir, test_flag=False): self.transform_params = data.get_transform_params() self.ori_load_pool = [] self.mosaic_load_pool = [] - self.last_pred = None + self.previous_pred = None feg_ori = impro.imread(os.path.join(video_dir,'origin_image','00001.jpg'),loadsize=self.opt.loadsize,rgb=True) feg_mask = impro.imread(os.path.join(video_dir,'mask','00001.png'),mod='gray',loadsize=self.opt.loadsize) self.mosaic_size,self.mod,self.rect_rat,self.feather = mosaic.get_random_parameter(feg_ori,feg_mask) @@ -72,8 +75,6 @@ def __init__(self, opt, video_dir, test_flag=False): _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) - # _ori_img = data.random_transform_single_image(_ori_img, opt.finesize,self.transform_params,self.test_flag) - # _mosaic_img = data.random_transform_single_image(_mosaic_img, opt.finesize,self.transform_params,self.test_flag) self.ori_load_pool.append(self.normalize(_ori_img)) self.mosaic_load_pool.append(self.normalize(_mosaic_img)) self.ori_load_pool = np.array(self.ori_load_pool) @@ -87,28 +88,29 @@ def __init__(self, opt, video_dir, test_flag=False): self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) #Init frist previous frame - self.last_pred = self.ori_load_pool[self.opt.S*self.opt.N-1].copy() + self.previous_pred = self.ori_load_pool[self.opt.S*self.opt.N-1].copy() # previous B,C,H,W - self.last_pred = self.last_pred.reshape(1,opt.finesize,opt.finesize,3).transpose((0,3,1,2)) + self.previous_pred = self.previous_pred.reshape(1,opt.finesize,opt.finesize,3).transpose((0,3,1,2)) def normalize(self,data): + ''' + normalize to -1 ~ 1 + ''' return (data.astype(np.float32)/255.0-0.5)/0.5 + + def anti_normalize(self,data): + return np.clip((data*0.5+0.5)*255,0,255).astype(np.uint8) def next(self): if self.t != 0: - self.last_pred = None + self.previous_pred = None self.ori_load_pool [:self.opt.S*self.opt.T-1] = self.ori_load_pool [1:self.opt.S*self.opt.T] self.mosaic_load_pool[:self.opt.S*self.opt.T-1] = self.mosaic_load_pool[1:self.opt.S*self.opt.T] #print(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg')) _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) - # if np.random.random() < 0.01: - # print('1') - # cv2.imwrite(util.randomstr(10)+'.jpg', _ori_img) - # _ori_img = data.random_transform_single_image(_ori_img, opt.finesize,self.transform_params,self.test_flag) - # _mosaic_img = data.random_transform_single_image(_mosaic_img, opt.finesize,self.transform_params,self.test_flag) _ori_img,_mosaic_img = self.normalize(_ori_img),self.normalize(_mosaic_img) self.ori_load_pool [self.opt.S*self.opt.T-1] = _ori_img self.mosaic_load_pool[self.opt.S*self.opt.T-1] = _mosaic_img @@ -116,11 +118,6 @@ def next(self): self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() - if np.random.random() < 0.01: - # print(self.ori_stream[0,0].shape) - print('1') - cv2.imwrite(util.randomstr(10)+'.jpg', self.ori_stream[0]) - # stream B,T,H,W,C -> B,C,T,H,W self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) @@ -141,8 +138,9 @@ def __init__(self, opt, videolist, test_flag=False): self.n_iter = len(self.videolist)//self.opt.load_thread//self.opt.batchsize*self.each_video_n_iter*self.opt.load_thread self.queue = Queue(self.opt.load_thread) self.ori_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W - self.mosaic_stream = self.ori_stream.copy() - self.last_pred = np.zeros((self.opt.batchsize,3,self.opt.finesize,self.opt.finesize),dtype=np.float32) + self.mosaic_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W + self.previous_pred = np.zeros((self.opt.batchsize,3,self.opt.finesize,self.opt.finesize),dtype=np.float32) + self.load_init() def load(self,videolist): for load_video_iter in range(len(videolist)//self.opt.batchsize): @@ -153,12 +151,12 @@ def load(self,videolist): self.ori_stream[i] = videoloaders[i].ori_stream self.mosaic_stream[i] = videoloaders[i].mosaic_stream if each_video_iter == 0: - self.last_pred[i] = videoloaders[i].last_pred + self.previous_pred[i] = videoloaders[i].previous_pred videoloaders[i].next() if each_video_iter == 0: - self.queue.put([self.ori_stream,self.mosaic_stream,self.last_pred]) + self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),self.previous_pred]) else: - self.queue.put([self.ori_stream,self.mosaic_stream,None]) + self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),None]) def load_init(self): ptvn = len(self.videolist)//self.opt.load_thread #pre_thread_video_num @@ -209,31 +207,28 @@ def get_data(self): videolist_eval = videolist[int(len(videolist)*0.8):].copy() dataloader_train = DataLoader(opt, videolist_train) -dataloader_train.load_init() dataloader_eval = DataLoader(opt, videolist_eval) -dataloader_eval.load_init() -previous_predframe_train = 0 -previous_predframe_eval = 0 +previous_predframe_tmp = 0 for train_iter in range(dataloader_train.n_iter): t_start = time.time() # train - ori_stream,mosaic_stream,last_frame = dataloader_train.get_data() + ori_stream,mosaic_stream,previous_frame = dataloader_train.get_data() ori_stream = data.to_tensor(ori_stream, opt.use_gpu) mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) - if last_frame is None: - last_frame = data.to_tensor(previous_predframe_train, opt.use_gpu) + if previous_frame is None: + previous_frame = data.to_tensor(previous_predframe_tmp, opt.use_gpu) else: - last_frame = data.to_tensor(last_frame, opt.use_gpu) + previous_frame = data.to_tensor(previous_frame, opt.use_gpu) optimizer.zero_grad() - out = net(mosaic_stream,last_frame) + out = net(mosaic_stream,previous_frame) loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG TBGlobalWriter.add_scalars('loss/train', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) loss = loss_L1+loss_VGG loss.backward() optimizer.step() - previous_predframe_train = out.detach().cpu().numpy() + previous_predframe_tmp = out.detach().cpu().numpy() # save network if train_iter%opt.save_freq == 0 and train_iter != 0: @@ -257,19 +252,19 @@ def get_data(self): # eval if (train_iter)%5 ==0: - ori_stream,mosaic_stream,last_frame = dataloader_eval.get_data() + ori_stream,mosaic_stream,previous_frame = dataloader_eval.get_data() ori_stream = data.to_tensor(ori_stream, opt.use_gpu) mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) - if last_frame is None: - last_frame = data.to_tensor(previous_predframe_eval, opt.use_gpu) + if previous_frame is None: + previous_frame = data.to_tensor(previous_predframe_tmp, opt.use_gpu) else: - last_frame = data.to_tensor(last_frame, opt.use_gpu) + previous_frame = data.to_tensor(previous_frame, opt.use_gpu) with torch.no_grad(): - out = net(mosaic_stream,last_frame) + out = net(mosaic_stream,previous_frame) loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG TBGlobalWriter.add_scalars('loss/eval', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) - previous_predframe_eval = out.detach().cpu().numpy() + previous_predframe_tmp = out.detach().cpu().numpy() #psnr if (train_iter)%opt.psnr_freq ==0: @@ -292,19 +287,18 @@ def get_data(self): t_strat = time.time() # test - test_dir = '../../datasets/video_test' - if train_iter % opt.showresult_freq == 0 and os.path.isdir(test_dir): + if train_iter % opt.showresult_freq == 0 and os.path.isdir(opt.dataset_test): show_imgs = [] - videos = os.listdir(test_dir) + videos = os.listdir(opt.dataset_test) sorted(videos) for video in videos: - frames = os.listdir(os.path.join(test_dir,video,'image')) + frames = os.listdir(os.path.join(opt.dataset_test,video,'image')) sorted(frames) mosaic_stream = [] for i in range(opt.T): - _mosaic = impro.imread(os.path.join(test_dir,video,'image',frames[i*opt.S]),loadsize=opt.finesize,rgb=True) + _mosaic = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[i*opt.S]),loadsize=opt.finesize,rgb=True) mosaic_stream.append(_mosaic) - previous = impro.imread(os.path.join(test_dir,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) + previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) From 2bbda3510a4e315d96d6d2e1c83944f470be6b27 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Mon, 19 Apr 2021 10:21:34 +0800 Subject: [PATCH 3/9] BVDNet SpectralNorm --- .gitignore | 1 + models/BVDNet.py | 52 ++++++++++++++++++++++++++++++-------------- models/model_util.py | 42 ++++++++++++++++++++++++++++++++++- 3 files changed, 78 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 822f94d..5ebe676 100644 --- a/.gitignore +++ b/.gitignore @@ -143,6 +143,7 @@ video_tmp/ result/ nohup.out #./ +/.vscode /pix2pix /pix2pixHD /tmp diff --git a/models/BVDNet.py b/models/BVDNet.py index ab5adeb..7734e86 100644 --- a/models/BVDNet.py +++ b/models/BVDNet.py @@ -4,17 +4,38 @@ from .pix2pixHD_model import * from .model_util import * +class UpBlock(nn.Module): + def __init__(self, in_channel, out_channel, kernel_size=3, padding=1): + super().__init__() + + self.convup = nn.Sequential( + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.ReflectionPad2d(padding), + # EqualConv2d(out_channel, out_channel, kernel_size, padding=padding), + SpectralNorm(nn.Conv2d(in_channel, out_channel, kernel_size)), + nn.LeakyReLU(0.2), + # Blur(out_channel), + ) + + + def forward(self, input): + + outup = self.convup(input) + + return outup + class Encoder2d(nn.Module): - def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm2d, activation = nn.ReLU(True)): + def __init__(self, input_nc, ngf=64, n_downsampling=3, activation = nn.LeakyReLU(0.2)): super(Encoder2d, self).__init__() - model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] + model = [nn.ReflectionPad2d(3), SpectralNorm(nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0)), activation] ### downsample for i in range(n_downsampling): mult = 2**i - model += [nn.ReflectionPad2d(1),nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0), - norm_layer(ngf * mult * 2), activation] + model += [ nn.ReflectionPad2d(1), + SpectralNorm(nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0)), + activation] self.model = nn.Sequential(*model) @@ -22,15 +43,15 @@ def forward(self, input): return self.model(input) class Encoder3d(nn.Module): - def __init__(self, input_nc, ngf=64, n_downsampling=3, norm_layer=nn.BatchNorm3d,activation = nn.ReLU(True)): + def __init__(self, input_nc, ngf=64, n_downsampling=3, activation = nn.LeakyReLU(0.2)): super(Encoder3d, self).__init__() - model = [nn.Conv3d(input_nc, ngf, kernel_size=3, padding=1), norm_layer(ngf), activation] + model = [SpectralNorm(nn.Conv3d(input_nc, ngf, kernel_size=3, padding=1)), activation] ### downsample for i in range(n_downsampling): mult = 2**i - model += [nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1), - norm_layer(ngf * mult * 2), activation] + model += [ SpectralNorm(nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)), + activation] self.model = nn.Sequential(*model) @@ -38,32 +59,31 @@ def forward(self, input): return self.model(input) class BVDNet(nn.Module): - def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3,norm='batch',activation=nn.LeakyReLU(0.2)): + def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3,activation=nn.LeakyReLU(0.2)): super(BVDNet, self).__init__() ngf = 64 padding_type = 'reflect' - norm_layer = get_norm_layer(norm,'2d') - norm_layer_3d = get_norm_layer(norm,'3d') self.N = N # encoder - self.encoder3d = Encoder3d(input_nc,64,n_downsampling,norm_layer_3d,activation) - self.encoder2d = Encoder2d(input_nc,64,n_downsampling,norm_layer,activation) + self.encoder3d = Encoder3d(input_nc,64,n_downsampling,activation) + self.encoder2d = Encoder2d(input_nc,64,n_downsampling,activation) ### resnet blocks self.blocks = [] mult = 2**n_downsampling for i in range(n_blocks): - self.blocks += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=nn.ReLU(True), norm_layer=norm_layer)] + self.blocks += [ResnetBlockSpectralNorm(ngf * mult, padding_type=padding_type, activation=activation)] self.blocks = nn.Sequential(*self.blocks) ### decoder self.decoder = [] for i in range(n_downsampling): mult = 2**(n_downsampling - i) - self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), - norm_layer(int(ngf * mult / 2)), activation] + self.decoder += [UpBlock(ngf * mult, int(ngf * mult / 2))] + # self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), + # norm_layer(int(ngf * mult / 2)), activation] # self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'), # nn.ReflectionPad2d(1), # nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), diff --git a/models/model_util.py b/models/model_util.py index 8865d74..f2905c7 100644 --- a/models/model_util.py +++ b/models/model_util.py @@ -1,6 +1,7 @@ import torch import torch.nn as nn from torch.nn import init +import torch.nn.utils.spectral_norm as SpectralNorm import functools @@ -51,4 +52,43 @@ def init_func(m): init.constant_(m.bias.data, 0.0) print('initialize network with %s' % init_type) - net.apply(init_func) \ No newline at end of file + net.apply(init_func) + +class ResnetBlockSpectralNorm(nn.Module): + def __init__(self, dim, padding_type, activation=nn.LeakyReLU(0.2), use_dropout=False): + super(ResnetBlockSpectralNorm, self).__init__() + self.conv_block = self.build_conv_block(dim, padding_type, activation, use_dropout) + + def build_conv_block(self, dim, padding_type, activation, use_dropout): + conv_block = [] + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + + conv_block += [SpectralNorm(nn.Conv2d(dim, dim, kernel_size=3, padding=p)), + activation] + if use_dropout: + conv_block += [nn.Dropout(0.5)] + + p = 0 + if padding_type == 'reflect': + conv_block += [nn.ReflectionPad2d(1)] + elif padding_type == 'replicate': + conv_block += [nn.ReplicationPad2d(1)] + elif padding_type == 'zero': + p = 1 + else: + raise NotImplementedError('padding [%s] is not implemented' % padding_type) + conv_block += [SpectralNorm(nn.Conv2d(dim, dim, kernel_size=3, padding=p))] + + return nn.Sequential(*conv_block) + + def forward(self, x): + out = x + self.conv_block(x) + return out \ No newline at end of file From 4c6b29b42ff8f1609a4cd1f93f0b056f6b26fc63 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Tue, 20 Apr 2021 14:31:58 +0800 Subject: [PATCH 4/9] Optimize the ffmpeg command, modify use_gpu to gpu_id --- cores/core.py | 2 +- cores/options.py | 10 +- make_datasets/make_pix2pix_dataset.py | 2 +- make_datasets/make_video_dataset.py | 3 +- models/loadmodel.py | 8 +- models/runmodel.py | 14 +- train/add/train.py | 12 +- train/clean/train.py | 35 +-- train/clean/train_old.py | 310 -------------------------- util/data.py | 8 +- util/ffmpeg.py | 5 +- 11 files changed, 53 insertions(+), 356 deletions(-) delete mode 100644 train/clean/train_old.py diff --git a/cores/core.py b/cores/core.py index db99217..9fb858a 100644 --- a/cores/core.py +++ b/cores/core.py @@ -283,7 +283,7 @@ def cleanmosaic_video_fusion(opt,netG,netM): mosaic_input[:,:,k*3:(k+1)*3] = impro.resize(img_pool[k][y-size:y+size,x-size:x+size], INPUT_SIZE) mask_input = impro.resize(mask,np.min(img_origin.shape[:2]))[y-size:y+size,x-size:x+size] mosaic_input[:,:,-1] = impro.resize(mask_input, INPUT_SIZE) - mosaic_input_tensor = data.im2tensor(mosaic_input,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False) + mosaic_input_tensor = data.im2tensor(mosaic_input,bgr2rgb=False,gpu_id=opt.gpu_id,use_transform = False,is0_1 = False) unmosaic_pred = netG(mosaic_input_tensor) img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False ,is0_1 = False) img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather) diff --git a/cores/options.py b/cores/options.py index 6c972b5..58759f8 100644 --- a/cores/options.py +++ b/cores/options.py @@ -11,7 +11,7 @@ def __init__(self): def initialize(self): #base - self.parser.add_argument('--use_gpu', type=str,default='0', help='if -1, use cpu') + self.parser.add_argument('--gpu_id', type=str,default='0', help='if -1, use cpu') self.parser.add_argument('--media_path', type=str, default='./imgs/ruoruo.jpg',help='your videos or images path') self.parser.add_argument('-ss', '--start_time', type=str, default='00:00:00',help='start position of video, default is the beginning of video') self.parser.add_argument('-t', '--last_time', type=str, default='00:00:00',help='duration of the video, default is the entire video') @@ -60,13 +60,13 @@ def getparse(self, test_flag = False): self.opt.temp_dir = os.path.join(self.opt.temp_dir, 'DeepMosaics_temp') - if self.opt.use_gpu != '-1': - os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.use_gpu) + if self.opt.gpu_id != '-1': + os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.gpu_id) import torch if not torch.cuda.is_available(): - self.opt.use_gpu = '-1' + self.opt.gpu_id = '-1' # else: - # self.opt.use_gpu = '-1' + # self.opt.gpu_id = '-1' if test_flag: if not os.path.exists(self.opt.media_path): diff --git a/make_datasets/make_pix2pix_dataset.py b/make_datasets/make_pix2pix_dataset.py index 4256f6c..7dcae25 100644 --- a/make_datasets/make_pix2pix_dataset.py +++ b/make_datasets/make_pix2pix_dataset.py @@ -91,7 +91,7 @@ mask = mask_irr if 'network' in opt.mod: mask_net = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0] - if opt.use_gpu != -1: + if opt.gpu_id != -1: torch.cuda.empty_cache() if not opt.all_mosaic_area: mask_net = impro.find_mostlikely_ROI(mask_net) diff --git a/make_datasets/make_video_dataset.py b/make_datasets/make_video_dataset.py index b9ce6cb..64c7fb7 100644 --- a/make_datasets/make_video_dataset.py +++ b/make_datasets/make_video_dataset.py @@ -56,6 +56,7 @@ ffmpeg.video2image(videopath, opt.temp_dir+'/video2image/%05d.'+opt.tempimage_type,fps=1, start_time = util.second2stamp(cut_point*opt.interval),last_time = util.second2stamp(opt.time)) imagepaths = util.Traversal(opt.temp_dir+'/video2image') + imagepaths = sorted(imagepaths) cnt = 0 for i in range(opt.time): img = impro.imread(imagepaths[i]) @@ -124,5 +125,5 @@ util.writelog(os.path.join(opt.savedir,'opt.txt'), videopath+'\n'+str(result_cnt)+'\n'+str(e)) video_cnt +=1 - if opt.use_gpu != -1: + if opt.gpu_id != '-1': torch.cuda.empty_cache() diff --git a/models/loadmodel.py b/models/loadmodel.py index 974064a..75ca731 100755 --- a/models/loadmodel.py +++ b/models/loadmodel.py @@ -34,7 +34,7 @@ def pix2pix(opt): show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) netG.eval() - if opt.use_gpu != -1: + if opt.gpu_id != -1: netG.cuda() return netG @@ -60,7 +60,7 @@ def style(opt): __patch_instance_norm_state_dict(state_dict, netG, key.split('.')) netG.load_state_dict(state_dict) - if opt.use_gpu != -1: + if opt.gpu_id != -1: netG.cuda() return netG @@ -72,7 +72,7 @@ def video(opt): show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) netG.eval() - if opt.use_gpu != -1: + if opt.gpu_id != -1: netG.cuda() return netG @@ -87,6 +87,6 @@ def bisenet(opt,type='roi'): elif type == 'mosaic': net.load_state_dict(torch.load(opt.mosaic_position_model_path)) net.eval() - if opt.use_gpu != -1: + if opt.gpu_id != -1: net.cuda() return net diff --git a/models/runmodel.py b/models/runmodel.py index 2bdc88d..bba1fe4 100755 --- a/models/runmodel.py +++ b/models/runmodel.py @@ -7,9 +7,9 @@ import torch import numpy as np -def run_segment(img,net,size = 360,use_gpu = 0): +def run_segment(img,net,size = 360,gpu_id = 0): img = impro.resize(img,size) - img = data.im2tensor(img,use_gpu = use_gpu, bgr2rgb = False,use_transform = False , is0_1 = True) + img = data.im2tensor(img,gpu_id = gpu_id, bgr2rgb = False,use_transform = False , is0_1 = True) mask = net(img) mask = data.tensor2im(mask, gray=True,rgb2bgr = False, is0_1 = True) return mask @@ -19,7 +19,7 @@ def run_pix2pix(img,net,opt): img = impro.resize(img,512) else: img = impro.resize(img,128) - img = data.im2tensor(img,use_gpu=opt.use_gpu) + img = data.im2tensor(img,gpu_id=opt.gpu_id) img_fake = net(img) img_fake = data.tensor2im(img_fake) return img_fake @@ -53,15 +53,15 @@ def run_styletransfer(opt, net, img): img = cv2.Canny(img,opt.canny-50,opt.canny+50) if opt.only_edges: return img - img = data.im2tensor(img,use_gpu=opt.use_gpu,gray=True,use_transform = False,is0_1 = False) + img = data.im2tensor(img,gpu_id=opt.gpu_id,gray=True,use_transform = False,is0_1 = False) else: - img = data.im2tensor(img,use_gpu=opt.use_gpu,gray=False,use_transform = True) + img = data.im2tensor(img,gpu_id=opt.gpu_id,gray=False,use_transform = True) img = net(img) img = data.tensor2im(img) return img def get_ROI_position(img,net,opt,keepsize=True): - mask = run_segment(img,net,size=360,use_gpu = opt.use_gpu) + mask = run_segment(img,net,size=360,gpu_id = opt.gpu_id) mask = impro.mask_threshold(mask,opt.mask_extend,opt.mask_threshold) if keepsize: mask = impro.resize_like(mask, img) @@ -70,7 +70,7 @@ def get_ROI_position(img,net,opt,keepsize=True): def get_mosaic_position(img_origin,net_mosaic_pos,opt): h,w = img_origin.shape[:2] - mask = run_segment(img_origin,net_mosaic_pos,size=360,use_gpu = opt.use_gpu) + mask = run_segment(img_origin,net_mosaic_pos,size=360,gpu_id = opt.gpu_id) # mask_1 = mask.copy() mask = impro.mask_threshold(mask,ex_mun=int(min(h,w)/20),threshold=opt.mask_threshold) if not opt.all_mosaic_area: diff --git a/train/add/train.py b/train/add/train.py index 8ccc6ee..a2f04ef 100644 --- a/train/add/train.py +++ b/train/add/train.py @@ -54,10 +54,10 @@ util.writelog(os.path.join(dir_checkpoint,'loss.txt'), str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt)) -def Totensor(img,use_gpu=True): +def Totensor(img,gpu_id=True): size=img.shape[0] img = torch.from_numpy(img).float() - if opt.use_gpu != -1: + if opt.gpu_id != -1: img = img.cuda() return img @@ -71,8 +71,8 @@ def loadimage(imagepaths,maskpaths,opt,test_flag = False): img,mask = data.random_transform_pair_image(img, mask, opt.finesize, test_flag) images[i] = (img.transpose((2, 0, 1))/255.0) masks[i] = (mask.reshape(1,1,opt.finesize,opt.finesize)/255.0) - images = Totensor(images,opt.use_gpu) - masks = Totensor(masks,opt.use_gpu) + images = Totensor(images,opt.gpu_id) + masks = Totensor(masks,opt.gpu_id) return images,masks @@ -111,7 +111,7 @@ def loadimage(imagepaths,maskpaths,opt,test_flag = False): f = open(os.path.join(dir_checkpoint,'epoch_log.txt'),'r') opt.startepoch = int(f.read()) f.close() -if opt.use_gpu != -1: +if opt.gpu_id != -1: net.cuda() cudnn.benchmark = True @@ -135,7 +135,7 @@ def loadimage(imagepaths,maskpaths,opt,test_flag = False): starttime = datetime.datetime.now() util.writelog(os.path.join(dir_checkpoint,'loss.txt'),'Epoch {}/{}.'.format(epoch + 1, opt.maxepoch),True) net.train() - if opt.use_gpu != -1: + if opt.gpu_id != -1: net.cuda() epoch_loss = 0 for i in range(int(img_num*0.8/opt.batchsize)): diff --git a/train/clean/train.py b/train/clean/train.py index a944cd6..c826996 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -75,6 +75,9 @@ def __init__(self, opt, video_dir, test_flag=False): _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) + _ori_img = data.random_transform_single_image(_ori_img,opt.finesize,self.transform_params) + _mosaic_img = data.random_transform_single_image(_mosaic_img,opt.finesize,self.transform_params) + self.ori_load_pool.append(self.normalize(_ori_img)) self.mosaic_load_pool.append(self.normalize(_mosaic_img)) self.ori_load_pool = np.array(self.ori_load_pool) @@ -110,7 +113,9 @@ def next(self): _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) - + _ori_img = data.random_transform_single_image(_ori_img,opt.finesize,self.transform_params) + _mosaic_img = data.random_transform_single_image(_mosaic_img,opt.finesize,self.transform_params) + _ori_img,_mosaic_img = self.normalize(_ori_img),self.normalize(_mosaic_img) self.ori_load_pool [self.opt.S*self.opt.T-1] = _ori_img self.mosaic_load_pool[self.opt.S*self.opt.T-1] = _mosaic_img @@ -184,17 +189,17 @@ def get_data(self): net = BVDNet.BVDNet(opt.N) -if opt.use_gpu != '-1' and len(opt.use_gpu) == 1: +if opt.gpu_id != '-1' and len(opt.gpu_id) == 1: torch.backends.cudnn.benchmark = True net.cuda() -elif opt.use_gpu != '-1' and len(opt.use_gpu) > 1: +elif opt.gpu_id != '-1' and len(opt.gpu_id) > 1: torch.backends.cudnn.benchmark = True net = nn.DataParallel(net) net.cuda() optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) lossf_L1 = nn.L1Loss() -lossf_VGG = BVDNet.VGGLoss([opt.use_gpu]) +lossf_VGG = BVDNet.VGGLoss([opt.gpu_id]) videolist_tmp = os.listdir(opt.dataset) videolist = [] @@ -214,12 +219,12 @@ def get_data(self): t_start = time.time() # train ori_stream,mosaic_stream,previous_frame = dataloader_train.get_data() - ori_stream = data.to_tensor(ori_stream, opt.use_gpu) - mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) + ori_stream = data.to_tensor(ori_stream, opt.gpu_id) + mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) if previous_frame is None: - previous_frame = data.to_tensor(previous_predframe_tmp, opt.use_gpu) + previous_frame = data.to_tensor(previous_predframe_tmp, opt.gpu_id) else: - previous_frame = data.to_tensor(previous_frame, opt.use_gpu) + previous_frame = data.to_tensor(previous_frame, opt.gpu_id) optimizer.zero_grad() out = net(mosaic_stream,previous_frame) loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) @@ -232,7 +237,7 @@ def get_data(self): # save network if train_iter%opt.save_freq == 0 and train_iter != 0: - model_util.save(net, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.use_gpu) + model_util.save(net, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.gpu_id) # psnr if train_iter%opt.psnr_freq ==0: @@ -253,12 +258,12 @@ def get_data(self): # eval if (train_iter)%5 ==0: ori_stream,mosaic_stream,previous_frame = dataloader_eval.get_data() - ori_stream = data.to_tensor(ori_stream, opt.use_gpu) - mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) + ori_stream = data.to_tensor(ori_stream, opt.gpu_id) + mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) if previous_frame is None: - previous_frame = data.to_tensor(previous_predframe_tmp, opt.use_gpu) + previous_frame = data.to_tensor(previous_predframe_tmp, opt.gpu_id) else: - previous_frame = data.to_tensor(previous_frame, opt.use_gpu) + previous_frame = data.to_tensor(previous_frame, opt.gpu_id) with torch.no_grad(): out = net(mosaic_stream,previous_frame) loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) @@ -301,8 +306,8 @@ def get_data(self): previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - mosaic_stream = data.to_tensor(mosaic_stream, opt.use_gpu) - previous = data.im2tensor(previous,bgr2rgb = False, use_gpu = opt.use_gpu,use_transform = False, is0_1 = False) + mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) + previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id,use_transform = False, is0_1 = False) with torch.no_grad(): out = net(mosaic_stream,previous) show_imgs+= [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False),data.tensor2im(out,rgb2bgr = False)] diff --git a/train/clean/train_old.py b/train/clean/train_old.py deleted file mode 100644 index 70efb41..0000000 --- a/train/clean/train_old.py +++ /dev/null @@ -1,310 +0,0 @@ -import os -import sys -sys.path.append("..") -sys.path.append("../..") -from cores import Options -opt = Options() - -import numpy as np -import cv2 -import random -import torch -import torch.nn as nn -import time -from multiprocessing import Process, Queue - -from util import mosaic,util,ffmpeg,filt,data -from util import image_processing as impro -from models import pix2pix_model,pix2pixHD_model,video_model,unet_model,loadmodel,videoHD_model -import matplotlib -matplotlib.use('Agg') -from matplotlib import pyplot as plt -import torch.backends.cudnn as cudnn - -''' ---------------------------Get options-------------------------- -''' -opt.parser.add_argument('--N',type=int,default=25, help='') -opt.parser.add_argument('--lr',type=float,default=0.0002, help='') -opt.parser.add_argument('--beta1',type=float,default=0.5, help='') -opt.parser.add_argument('--gan', action='store_true', help='if specified, use gan') -opt.parser.add_argument('--l2', action='store_true', help='if specified, use L2 loss') -opt.parser.add_argument('--hd', action='store_true', help='if specified, use HD model') -opt.parser.add_argument('--lambda_L1',type=float,default=100, help='') -opt.parser.add_argument('--lambda_gan',type=float,default=1, help='') -opt.parser.add_argument('--finesize',type=int,default=256, help='') -opt.parser.add_argument('--loadsize',type=int,default=286, help='') -opt.parser.add_argument('--batchsize',type=int,default=1, help='') -opt.parser.add_argument('--norm',type=str,default='instance', help='') -opt.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use') -opt.parser.add_argument('--n_layers_D', type=int, default=3, help='only used if which_model_netD==n_layers') -opt.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss') -opt.parser.add_argument('--image_pool',type=int,default=8, help='number of image load pool') -opt.parser.add_argument('--load_process',type=int,default=4, help='number of process for loading data') - -opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') -opt.parser.add_argument('--maxiter',type=int,default=10000000, help='') -opt.parser.add_argument('--savefreq',type=int,default=10000, help='') -opt.parser.add_argument('--startiter',type=int,default=0, help='') -opt.parser.add_argument('--continue_train', action='store_true', help='') -opt.parser.add_argument('--savename',type=str,default='face', help='') - - -''' ---------------------------Init-------------------------- -''' -opt = opt.getparse() -dir_checkpoint = os.path.join('checkpoints/',opt.savename) -util.makedirs(dir_checkpoint) -util.writelog(os.path.join(dir_checkpoint,'loss.txt'), - str(time.asctime(time.localtime(time.time())))+'\n'+util.opt2str(opt)) -cudnn.benchmark = True - -N = opt.N -loss_sum = [0.,0.,0.,0.,0.,0] -loss_plot = [[],[],[],[]] -item_plot = [] - -# list video dir -videonames = os.listdir(opt.dataset) -videonames.sort() -lengths = [];tmp = [] -print('Check dataset...') -for video in videonames: - if video != 'opt.txt': - video_images = os.listdir(os.path.join(opt.dataset,video,'origin_image')) - lengths.append(len(video_images)) - tmp.append(video) -videonames = tmp -video_num = len(videonames) - -#--------------------------Init network-------------------------- -print('Init network...') -if opt.hd: - netG = videoHD_model.MosaicNet(3*N+1, 3, norm=opt.norm) -else: - netG = video_model.MosaicNet(3*N+1, 3, norm=opt.norm) -netG.cuda() -loadmodel.show_paramsnumber(netG,'netG') - -if opt.gan: - if opt.hd: - netD = pix2pixHD_model.define_D(6, 64, opt.n_layers_D, norm = opt.norm, use_sigmoid=False, num_D=opt.num_D,getIntermFeat=True) - else: - netD = pix2pix_model.define_D(3*2, 64, 'basic', norm = opt.norm) - netD.cuda() - netD.train() - -#--------------------------continue train-------------------------- -if opt.continue_train: - if not os.path.isfile(os.path.join(dir_checkpoint,'last_G.pth')): - opt.continue_train = False - print('can not load last_G, training on init weight.') -if opt.continue_train: - netG.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_G.pth'))) - if opt.gan: - netD.load_state_dict(torch.load(os.path.join(dir_checkpoint,'last_D.pth'))) - f = open(os.path.join(dir_checkpoint,'iter'),'r') - opt.startiter = int(f.read()) - f.close() - -#--------------------------optimizer & loss-------------------------- -optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) -criterion_L1 = nn.L1Loss() -criterion_L2 = nn.MSELoss() -if opt.gan: - optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr,betas=(opt.beta1, 0.999)) - if opt.hd: - criterionGAN = pix2pixHD_model.GANLoss(tensor=torch.cuda.FloatTensor).cuda() - criterionFeat = pix2pixHD_model.GAN_Feat_loss(opt) - criterionVGG = pix2pixHD_model.VGGLoss([opt.use_gpu]) - else: - criterionGAN = pix2pix_model.GANLoss(gan_mode='lsgan').cuda() - -''' ---------------------------preload data & data pool-------------------------- -''' -print('Preloading data, please wait...') -def preload(pool): - cnt = 0 - input_imgs = torch.rand(opt.batchsize,N*3+1,opt.finesize,opt.finesize) - ground_trues = torch.rand(opt.batchsize,3,opt.finesize,opt.finesize) - while 1: - try: - for i in range(opt.batchsize): - video_index = random.randint(0,video_num-1) - videoname = videonames[video_index] - img_index = random.randint(int(N/2)+1,lengths[video_index]- int(N/2)-1) - input_imgs[i],ground_trues[i] = data.load_train_video(videoname,img_index,opt) - cnt += 1 - pool.put([input_imgs,ground_trues]) - except Exception as e: - print("Error:",videoname,e) -pool = Queue(opt.image_pool) -for i in range(opt.load_process): - p = Process(target=preload,args=(pool,)) - p.daemon = True - p.start() - -''' ---------------------------train-------------------------- -''' -util.copyfile('./train.py', os.path.join(dir_checkpoint,'train.py')) -util.copyfile('../../models/videoHD_model.py', os.path.join(dir_checkpoint,'model.py')) -netG.train() -time_start=time.time() -print("Begin training...") -for iter in range(opt.startiter+1,opt.maxiter): - - inputdata,target = pool.get() - inputdata,target = inputdata.cuda(),target.cuda() - - if opt.gan: - # compute fake images: G(A) - pred = netG(inputdata) - real_A = inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:] - - # --------------------update D-------------------- - pix2pix_model.set_requires_grad(netD,True) - optimizer_D.zero_grad() - # Fake - fake_AB = torch.cat((real_A, pred), 1) - pred_fake = netD(fake_AB.detach()) - loss_D_fake = criterionGAN(pred_fake, False) - # Real - real_AB = torch.cat((real_A, target), 1) - pred_real = netD(real_AB) - loss_D_real = criterionGAN(pred_real, True) - # combine loss and calculate gradients - loss_D = (loss_D_fake + loss_D_real) * 0.5 - loss_sum[4] += loss_D_fake.item() - loss_sum[5] += loss_D_real.item() - # udpate D's weights - loss_D.backward() - optimizer_D.step() - - # --------------------update G-------------------- - pix2pix_model.set_requires_grad(netD,False) - optimizer_G.zero_grad() - - # First, G(A) should fake the discriminator - fake_AB = torch.cat((real_A, pred), 1) - pred_fake = netD(fake_AB) - loss_G_GAN = criterionGAN(pred_fake, True)*opt.lambda_gan - - # combine loss and calculate gradients - if opt.l2: - loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 - else: - loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 - - if opt.hd: - real_AB = torch.cat((real_A, target), 1) - pred_real = netD(real_AB) - loss_G_GAN_Feat = criterionFeat(pred_fake,pred_real) - loss_VGG = criterionVGG(pred, target) * opt.lambda_feat - loss_G = loss_G_GAN + loss_G_L1 + loss_G_GAN_Feat + loss_VGG - else: - loss_G = loss_G_GAN + loss_G_L1 - loss_sum[0] += loss_G_L1.item() - loss_sum[1] += loss_G_GAN.item() - loss_sum[2] += loss_G_GAN_Feat.item() - loss_sum[3] += loss_VGG.item() - - # udpate G's weights - loss_G.backward() - optimizer_G.step() - - else: - pred = netG(inputdata) - if opt.l2: - loss_G_L1 = (criterion_L1(pred, target)+criterion_L2(pred, target)) * opt.lambda_L1 - else: - loss_G_L1 = criterion_L1(pred, target) * opt.lambda_L1 - loss_sum[0] += loss_G_L1.item() - - optimizer_G.zero_grad() - loss_G_L1.backward() - optimizer_G.step() - - # save train result - if (iter+1)%1000 == 0: - try: - data.showresult(inputdata[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:], - target, pred, os.path.join(dir_checkpoint,'result_train.jpg')) - except Exception as e: - print(e) - - # plot - if (iter+1)%1000 == 0: - time_end = time.time() - #if opt.gan: - savestr ='iter:{0:d} L1_loss:{1:.3f} GAN_loss:{2:.3f} Feat:{3:.3f} VGG:{4:.3f} time:{5:.2f}'.format( - iter+1,loss_sum[0]/1000,loss_sum[1]/1000,loss_sum[2]/1000,loss_sum[3]/1000,(time_end-time_start)/1000) - util.writelog(os.path.join(dir_checkpoint,'loss.txt'), savestr,True) - if (iter+1)/1000 >= 10: - for i in range(4):loss_plot[i].append(loss_sum[i]/1000) - item_plot.append(iter+1) - try: - labels = ['L1_loss','GAN_loss','GAN_Feat_loss','VGG_loss'] - for i in range(4):plt.plot(item_plot,loss_plot[i],label=labels[i]) - plt.xlabel('iter') - plt.legend(loc=1) - plt.savefig(os.path.join(dir_checkpoint,'loss.jpg')) - plt.close() - except Exception as e: - print("error:",e) - - loss_sum = [0.,0.,0.,0.,0.,0.] - time_start=time.time() - - # save network - if (iter+1)%(opt.savefreq//10) == 0: - torch.save(netG.cpu().state_dict(),os.path.join(dir_checkpoint,'last_G.pth')) - if opt.gan: - torch.save(netD.cpu().state_dict(),os.path.join(dir_checkpoint,'last_D.pth')) - if opt.use_gpu !=-1 : - netG.cuda() - if opt.gan: - netD.cuda() - f = open(os.path.join(dir_checkpoint,'iter'),'w+') - f.write(str(iter+1)) - f.close() - - if (iter+1)%opt.savefreq == 0: - os.rename(os.path.join(dir_checkpoint,'last_G.pth'),os.path.join(dir_checkpoint,str(iter+1)+'G.pth')) - if opt.gan: - os.rename(os.path.join(dir_checkpoint,'last_D.pth'),os.path.join(dir_checkpoint,str(iter+1)+'D.pth')) - print('network saved.') - - #test - if (iter+1)%opt.savefreq == 0: - if os.path.isdir('./test'): - netG.eval() - - test_names = os.listdir('./test') - test_names.sort() - result = np.zeros((opt.finesize*2,opt.finesize*len(test_names),3), dtype='uint8') - - for cnt,test_name in enumerate(test_names,0): - img_names = os.listdir(os.path.join('./test',test_name,'image')) - img_names.sort() - inputdata = np.zeros((opt.finesize,opt.finesize,3*N+1), dtype='uint8') - for i in range(0,N): - img = impro.imread(os.path.join('./test',test_name,'image',img_names[i])) - img = impro.resize(img,opt.finesize) - inputdata[:,:,i*3:(i+1)*3] = img - - mask = impro.imread(os.path.join('./test',test_name,'mask.png'),'gray') - mask = impro.resize(mask,opt.finesize) - mask = impro.mask_threshold(mask,15,128) - inputdata[:,:,-1] = mask - result[0:opt.finesize,opt.finesize*cnt:opt.finesize*(cnt+1),:] = inputdata[:,:,int((N-1)/2)*3:(int((N-1)/2)+1)*3] - inputdata = data.im2tensor(inputdata,bgr2rgb=False,use_gpu=opt.use_gpu,use_transform = False,is0_1 = False) - pred = netG(inputdata) - - pred = data.tensor2im(pred,rgb2bgr = False, is0_1 = False) - result[opt.finesize:opt.finesize*2,opt.finesize*cnt:opt.finesize*(cnt+1),:] = pred - - cv2.imwrite(os.path.join(dir_checkpoint,str(iter+1)+'_test.jpg'), result) - netG.train() \ No newline at end of file diff --git a/util/data.py b/util/data.py index 8884047..83a14cd 100755 --- a/util/data.py +++ b/util/data.py @@ -42,7 +42,7 @@ def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = return image_numpy.astype(imtype) -def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = True, use_gpu = 0, use_transform = True,is0_1 = True): +def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = True, gpu_id = 0, use_transform = True,is0_1 = True): if gray: h, w = image_numpy.shape @@ -65,7 +65,7 @@ def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = image_tensor = torch.from_numpy(image_numpy).float() if reshape: image_tensor = image_tensor.reshape(1,ch,h,w) - if use_gpu != '-1': + if gpu_id != '-1': image_tensor = image_tensor.cuda() return image_tensor @@ -281,8 +281,8 @@ def random_transform_pair_image(img,mask,finesize,test_flag = False): # input_img[:,:,i*3:(i+1)*3] = img_mosaic # # to tensor # input_img,ground_true = random_transform_video(input_img,ground_true,opt.finesize,N) -# input_img = im2tensor(input_img,bgr2rgb=False,use_gpu=-1,use_transform = False,is0_1=False) -# ground_true = im2tensor(ground_true,bgr2rgb=False,use_gpu=-1,use_transform = False,is0_1=False) +# input_img = im2tensor(input_img,bgr2rgb=False,gpu_id=-1,use_transform = False,is0_1=False) +# ground_true = im2tensor(ground_true,bgr2rgb=False,gpu_id=-1,use_transform = False,is0_1=False) # return input_img,ground_true diff --git a/util/ffmpeg.py b/util/ffmpeg.py index 4eb668b..2088142 100755 --- a/util/ffmpeg.py +++ b/util/ffmpeg.py @@ -1,5 +1,5 @@ import os,json - +import subprocess # ffmpeg 3.4.6 def args2cmd(args): @@ -32,10 +32,11 @@ def run(args,mode = 0): return sout def video2image(videopath, imagepath, fps=0, start_time='00:00:00', last_time='00:00:00'): - args = ['ffmpeg', '-i', '"'+videopath+'"'] + args = ['ffmpeg'] if last_time != '00:00:00': args += ['-ss', start_time] args += ['-t', last_time] + args += ['-i', '"'+videopath+'"'] if fps != 0: args += ['-r', str(fps)] args += ['-f', 'image2','-q:v','-0',imagepath] From 48c032b16e0804a67cae6eadcc822b0fdb39f3ae Mon Sep 17 00:00:00 2001 From: hypox64 Date: Thu, 22 Apr 2021 16:07:02 +0800 Subject: [PATCH 5/9] Readly to add gan --- models/BVDNet.py | 189 +++++++++++++++++++++-------------- models/model_util.py | 170 ++++++++++++++++++++++++++++++- train/clean/train.py | 210 +++++++++------------------------------ util/data.py | 104 ++----------------- util/dataloader.py | 134 +++++++++++++++++++++++++ util/degradater.py | 119 ++++++++++++++++++++++ util/image_processing.py | 62 +++--------- 7 files changed, 600 insertions(+), 388 deletions(-) create mode 100644 util/dataloader.py create mode 100644 util/degradater.py diff --git a/models/BVDNet.py b/models/BVDNet.py index 7734e86..ada2ee0 100644 --- a/models/BVDNet.py +++ b/models/BVDNet.py @@ -1,8 +1,8 @@ import torch import torch.nn as nn -import torch.nn.functional as F from .pix2pixHD_model import * from .model_util import * +from models import model_util class UpBlock(nn.Module): def __init__(self, in_channel, out_channel, kernel_size=3, padding=1): @@ -17,14 +17,10 @@ def __init__(self, in_channel, out_channel, kernel_size=3, padding=1): # Blur(out_channel), ) - def forward(self, input): - outup = self.convup(input) - return outup - class Encoder2d(nn.Module): def __init__(self, input_nc, ngf=64, n_downsampling=3, activation = nn.LeakyReLU(0.2)): super(Encoder2d, self).__init__() @@ -52,21 +48,19 @@ def __init__(self, input_nc, ngf=64, n_downsampling=3, activation = nn.LeakyReLU mult = 2**i model += [ SpectralNorm(nn.Conv3d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1)), activation] - self.model = nn.Sequential(*model) def forward(self, input): return self.model(input) class BVDNet(nn.Module): - def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3,activation=nn.LeakyReLU(0.2)): + def __init__(self, N=2, n_downsampling=3, n_blocks=4, input_nc=3, output_nc=3,activation=nn.LeakyReLU(0.2)): super(BVDNet, self).__init__() - ngf = 64 padding_type = 'reflect' self.N = N - # encoder + ### encoder self.encoder3d = Encoder3d(input_nc,64,n_downsampling,activation) self.encoder2d = Encoder2d(input_nc,64,n_downsampling,activation) @@ -82,13 +76,6 @@ def __init__(self, N, n_downsampling=3, n_blocks=1, input_nc=3, output_nc=3,acti for i in range(n_downsampling): mult = 2**(n_downsampling - i) self.decoder += [UpBlock(ngf * mult, int(ngf * mult / 2))] - # self.decoder += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), - # norm_layer(int(ngf * mult / 2)), activation] - # self.decoder += [ nn.Upsample(scale_factor = 2, mode='nearest'), - # nn.ReflectionPad2d(1), - # nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), - # norm_layer(int(ngf * mult / 2)), - # activation] self.decoder += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] self.decoder = nn.Sequential(*self.decoder) self.limiter = nn.Tanh() @@ -97,68 +84,124 @@ def forward(self, stream, previous): this_shortcut = stream[:,:,self.N] stream = self.encoder3d(stream) stream = stream.reshape(stream.size(0),stream.size(1),stream.size(3),stream.size(4)) - # print(stream.shape) previous = self.encoder2d(previous) x = stream + previous x = self.blocks(x) x = self.decoder(x) x = x+this_shortcut x = self.limiter(x) - #print(x.shape) - - # print(stream.shape,previous.shape) return x -class VGGLoss(nn.Module): - def __init__(self, gpu_ids): - super(VGGLoss, self).__init__() - - self.vgg = Vgg19() - if gpu_ids != '-1' and len(gpu_ids) == 1: - self.vgg.cuda() - elif gpu_ids != '-1' and len(gpu_ids) > 1: - self.vgg = nn.DataParallel(self.vgg) - self.vgg.cuda() - - self.criterion = nn.L1Loss() - self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] - - def forward(self, x, y): - x_vgg, y_vgg = self.vgg(x), self.vgg(y) - loss = 0 - for i in range(len(x_vgg)): - loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) - return loss - -from torchvision import models -class Vgg19(torch.nn.Module): - def __init__(self, requires_grad=False): - super(Vgg19, self).__init__() - vgg_pretrained_features = models.vgg19(pretrained=True).features - self.slice1 = torch.nn.Sequential() - self.slice2 = torch.nn.Sequential() - self.slice3 = torch.nn.Sequential() - self.slice4 = torch.nn.Sequential() - self.slice5 = torch.nn.Sequential() - for x in range(2): - self.slice1.add_module(str(x), vgg_pretrained_features[x]) - for x in range(2, 7): - self.slice2.add_module(str(x), vgg_pretrained_features[x]) - for x in range(7, 12): - self.slice3.add_module(str(x), vgg_pretrained_features[x]) - for x in range(12, 21): - self.slice4.add_module(str(x), vgg_pretrained_features[x]) - for x in range(21, 30): - self.slice5.add_module(str(x), vgg_pretrained_features[x]) - if not requires_grad: - for param in self.parameters(): - param.requires_grad = False - - def forward(self, X): - h_relu1 = self.slice1(X) - h_relu2 = self.slice2(h_relu1) - h_relu3 = self.slice3(h_relu2) - h_relu4 = self.slice4(h_relu3) - h_relu5 = self.slice5(h_relu4) - out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] - return out +def define_G(N=2, n_blocks=1, gpu_id='-1'): + netG = BVDNet(N = N, n_blocks=n_blocks) + if gpu_id != '-1' and len(gpu_id) == 1: + netG.cuda() + elif gpu_id != '-1' and len(gpu_id) > 1: + netG = nn.DataParallel(netG) + netG.cuda() + # netG.apply(model_util.init_weights) + return netG + +################################Discriminator################################ +def define_D(input_nc, ndf, n_layers_D, use_sigmoid=False, num_D=1, gpu_id='-1'): + netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid, num_D) + if gpu_id != '-1' and len(gpu_id) == 1: + netD.cuda() + elif gpu_id != '-1' and len(gpu_id) > 1: + netD = nn.DataParallel(netD) + netD.cuda() + netD.apply(model_util.init_weights) + return netD + +class MultiscaleDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False, num_D=3): + super(MultiscaleDiscriminator, self).__init__() + self.num_D = num_D + self.n_layers = n_layers + + for i in range(num_D): + netD = NLayerDiscriminator(input_nc, ndf, n_layers, use_sigmoid) + setattr(self, 'layer'+str(i), netD.model) + self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False) + + def singleD_forward(self, model, input): + return [model(input)] + + def forward(self, input): + num_D = self.num_D + result = [] + input_downsampled = input + for i in range(num_D): + model = getattr(self, 'layer'+str(num_D-1-i)) + result.append(self.singleD_forward(model, input_downsampled)) + if i != (num_D-1): + input_downsampled = self.downsample(input_downsampled) + return result + +# Defines the PatchGAN discriminator with the specified arguments. +class NLayerDiscriminator(nn.Module): + def __init__(self, input_nc, ndf=64, n_layers=3, use_sigmoid=False): + super(NLayerDiscriminator, self).__init__() + self.n_layers = n_layers + + kw = 4 + padw = int(np.ceil((kw-1.0)/2)) + sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2)]] + + nf = ndf + for n in range(1, n_layers): + nf_prev = nf + nf = min(nf * 2, 512) + sequence += [[ + SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw)), + nn.LeakyReLU(0.2) + ]] + + nf_prev = nf + nf = min(nf * 2, 512) + sequence += [[ + SpectralNorm(nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw)), + nn.LeakyReLU(0.2) + ]] + + sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]] + + if use_sigmoid: + sequence += [[nn.Sigmoid()]] + + sequence_stream = [] + for n in range(len(sequence)): + sequence_stream += sequence[n] + self.model = nn.Sequential(*sequence_stream) + + def forward(self, input): + return self.model(input) + +class GANLoss(nn.Module): + def __init__(self, mode='D'): + super(GANLoss, self).__init__() + if mode == 'D': + self.lossf = model_util.HingeLossD() + elif mode == 'G': + self.lossf = model_util.HingeLossG() + self.mode = mode + + def forward(self, dis_fake = None, dis_real = None): + if isinstance(dis_fake, list): + weight = 2**len(dis_fake) + if self.mode == 'D': + loss = 0 + for i in range(len(dis_fake)): + weight = weight/2 + loss += weight*self.lossf(dis_fake[i],dis_real[i]) + elif self.mode =='G': + loss = 0 + for i in range(len(dis_fake)): + weight = weight/2 + loss += weight*self.lossf(dis_fake[i]) + return loss + else: + if self.mode == 'D': + return self.lossf(dis_fake,dis_real) + elif self.mode =='G': + return self.lossf(dis_fake) diff --git a/models/model_util.py b/models/model_util.py index f2905c7..6c9d89e 100644 --- a/models/model_util.py +++ b/models/model_util.py @@ -1,9 +1,13 @@ +import functools +from math import exp + import torch import torch.nn as nn from torch.nn import init +from torch.autograd import Variable +import torch.nn.functional as F import torch.nn.utils.spectral_norm as SpectralNorm -import functools - +from torchvision import models def save(net,path,gpu_id): if isinstance(net, nn.DataParallel): @@ -13,6 +17,7 @@ def save(net,path,gpu_id): if gpu_id != '-1': net.cuda() +################################## initialization ################################## def get_norm_layer(norm_type='instance',mod = '2d'): if norm_type == 'batch': if mod == '2d': @@ -51,9 +56,10 @@ def init_func(m): init.normal_(m.weight.data, 1.0, gain) init.constant_(m.bias.data, 0.0) - print('initialize network with %s' % init_type) + # print('initialize network with %s' % init_type) net.apply(init_func) +################################## Network structure ################################## class ResnetBlockSpectralNorm(nn.Module): def __init__(self, dim, padding_type, activation=nn.LeakyReLU(0.2), use_dropout=False): super(ResnetBlockSpectralNorm, self).__init__() @@ -91,4 +97,160 @@ def build_conv_block(self, dim, padding_type, activation, use_dropout): def forward(self, x): out = x + self.conv_block(x) - return out \ No newline at end of file + return out + +################################## Loss function ################################## +class HingeLossD(nn.Module): + def __init__(self): + super(HingeLossD, self).__init__() + + def forward(self, dis_fake, dis_real): + loss_real = torch.mean(F.relu(1. - dis_real)) + loss_fake = torch.mean(F.relu(1. + dis_fake)) + return loss_real + loss_fake + +class HingeLossG(nn.Module): + def __init__(self): + super(HingeLossG, self).__init__() + + def forward(self, dis_fake): + loss_fake = -torch.mean(dis_fake) + return loss_fake + +class VGGLoss(nn.Module): + def __init__(self, gpu_id): + super(VGGLoss, self).__init__() + + self.vgg = Vgg19() + if gpu_id != '-1' and len(gpu_id) == 1: + self.vgg.cuda() + elif gpu_id != '-1' and len(gpu_id) > 1: + self.vgg = nn.DataParallel(self.vgg) + self.vgg.cuda() + + self.criterion = nn.L1Loss() + self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] + + def forward(self, x, y): + x_vgg, y_vgg = self.vgg(x), self.vgg(y) + loss = 0 + for i in range(len(x_vgg)): + loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach()) + return loss + +class Vgg19(torch.nn.Module): + def __init__(self, requires_grad=False): + super(Vgg19, self).__init__() + vgg_pretrained_features = models.vgg19(pretrained=True).features + self.slice1 = torch.nn.Sequential() + self.slice2 = torch.nn.Sequential() + self.slice3 = torch.nn.Sequential() + self.slice4 = torch.nn.Sequential() + self.slice5 = torch.nn.Sequential() + for x in range(2): + self.slice1.add_module(str(x), vgg_pretrained_features[x]) + for x in range(2, 7): + self.slice2.add_module(str(x), vgg_pretrained_features[x]) + for x in range(7, 12): + self.slice3.add_module(str(x), vgg_pretrained_features[x]) + for x in range(12, 21): + self.slice4.add_module(str(x), vgg_pretrained_features[x]) + for x in range(21, 30): + self.slice5.add_module(str(x), vgg_pretrained_features[x]) + if not requires_grad: + for param in self.parameters(): + param.requires_grad = False + + def forward(self, X): + h_relu1 = self.slice1(X) + h_relu2 = self.slice2(h_relu1) + h_relu3 = self.slice3(h_relu2) + h_relu4 = self.slice4(h_relu3) + h_relu5 = self.slice5(h_relu4) + out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5] + return out + +################################## Evaluation ################################## +'''https://github.com/Po-Hsun-Su/pytorch-ssim + +img1 = Variable(torch.rand(1, 1, 256, 256)) +img2 = Variable(torch.rand(1, 1, 256, 256)) + +if torch.cuda.is_available(): + img1 = img1.cuda() + img2 = img2.cuda() + +print(pytorch_ssim.ssim(img1, img2)) + +ssim_loss = pytorch_ssim.SSIM(window_size = 11) + +print(ssim_loss(img1, img2)) +''' + +def gaussian(window_size, sigma): + gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]) + return gauss/gauss.sum() + +def create_window(window_size, channel): + _1D_window = gaussian(window_size, 1.5).unsqueeze(1) + _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0) + window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous()) + return window + +def _ssim(img1, img2, window, window_size, channel, size_average = True): + mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel) + mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel) + + mu1_sq = mu1.pow(2) + mu2_sq = mu2.pow(2) + mu1_mu2 = mu1*mu2 + + sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq + sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq + sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2 + + C1 = 0.01**2 + C2 = 0.03**2 + + ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2)) + + if size_average: + return ssim_map.mean() + else: + return ssim_map.mean(1).mean(1).mean(1) + +class SSIM(torch.nn.Module): + def __init__(self, window_size = 11, size_average = True): + super(SSIM, self).__init__() + self.window_size = window_size + self.size_average = size_average + self.channel = 1 + self.window = create_window(window_size, self.channel) + + def forward(self, img1, img2): + (_, channel, _, _) = img1.size() + + if channel == self.channel and self.window.data.type() == img1.data.type(): + window = self.window + else: + window = create_window(self.window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + self.window = window + self.channel = channel + + + return _ssim(img1, img2, window, self.window_size, channel, self.size_average) + +def ssim(img1, img2, window_size = 11, size_average = True): + (_, channel, _, _) = img1.size() + window = create_window(window_size, channel) + + if img1.is_cuda: + window = window.cuda(img1.get_device()) + window = window.type_as(img1) + + return _ssim(img1, img2, window, window_size, channel, size_average) diff --git a/train/clean/train.py b/train/clean/train.py index c826996..38be2c7 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -11,11 +11,10 @@ import torch import torch.nn as nn import time -from multiprocessing import Process, Queue -from util import mosaic,util,ffmpeg,filt,data +from util import mosaic,util,ffmpeg,filt,data,dataloader from util import image_processing as impro -from models import pix2pix_model,pix2pixHD_model,video_model,unet_model,loadmodel,videoHD_model,BVDNet,model_util +from models import BVDNet,model_util import torch.backends.cudnn as cudnn from tensorboardX import SummaryWriter @@ -26,13 +25,15 @@ opt.parser.add_argument('--S',type=int,default=3, help='Stride of 3 frames') # opt.parser.add_argument('--T',type=int,default=7, help='T = 2N+1') opt.parser.add_argument('--M',type=int,default=100, help='How many frames read from each videos') -opt.parser.add_argument('--lr',type=float,default=0.001, help='') +opt.parser.add_argument('--lr',type=float,default=0.0002, help='') opt.parser.add_argument('--beta1',type=float,default=0.9, help='') opt.parser.add_argument('--beta2',type=float,default=0.999, help='') opt.parser.add_argument('--finesize',type=int,default=256, help='') opt.parser.add_argument('--loadsize',type=int,default=286, help='') opt.parser.add_argument('--batchsize',type=int,default=1, help='') -opt.parser.add_argument('--lambda_VGG',type=float,default=0.1, help='') +opt.parser.add_argument('--lambda_L2',type=float,default=100, help='') +opt.parser.add_argument('--lambda_VGG',type=float,default=1, help='') +opt.parser.add_argument('--lambda_GAN',type=float,default=1, help='') opt.parser.add_argument('--load_thread',type=int,default=4, help='number of thread for loading data') opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') @@ -45,134 +46,6 @@ opt.parser.add_argument('--showresult_num',type=int,default=4, help='') opt.parser.add_argument('--psnr_freq',type=int,default=100, help='') -class TrainVideoLoader(object): - """docstring for VideoLoader - Load a single video(Converted to images) - How to use: - 1.Init TrainVideoLoader as loader - 2.Get data by loader.ori_stream - 3.loader.next() to get next stream - """ - def __init__(self, opt, video_dir, test_flag=False): - super(TrainVideoLoader, self).__init__() - self.opt = opt - self.test_flag = test_flag - self.video_dir = video_dir - self.t = 0 - self.n_iter = self.opt.M -self.opt.S*(self.opt.T+1) - self.transform_params = data.get_transform_params() - self.ori_load_pool = [] - self.mosaic_load_pool = [] - self.previous_pred = None - feg_ori = impro.imread(os.path.join(video_dir,'origin_image','00001.jpg'),loadsize=self.opt.loadsize,rgb=True) - feg_mask = impro.imread(os.path.join(video_dir,'mask','00001.png'),mod='gray',loadsize=self.opt.loadsize) - self.mosaic_size,self.mod,self.rect_rat,self.feather = mosaic.get_random_parameter(feg_ori,feg_mask) - self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] - - #Init load pool - for i in range(self.opt.S*self.opt.T): - #print(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg')) - _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) - _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) - _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) - _ori_img = data.random_transform_single_image(_ori_img,opt.finesize,self.transform_params) - _mosaic_img = data.random_transform_single_image(_mosaic_img,opt.finesize,self.transform_params) - - self.ori_load_pool.append(self.normalize(_ori_img)) - self.mosaic_load_pool.append(self.normalize(_mosaic_img)) - self.ori_load_pool = np.array(self.ori_load_pool) - self.mosaic_load_pool = np.array(self.mosaic_load_pool) - - #Init frist stream - self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() - self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() - # stream B,T,H,W,C -> B,C,T,H,W - self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - - #Init frist previous frame - self.previous_pred = self.ori_load_pool[self.opt.S*self.opt.N-1].copy() - # previous B,C,H,W - self.previous_pred = self.previous_pred.reshape(1,opt.finesize,opt.finesize,3).transpose((0,3,1,2)) - - def normalize(self,data): - ''' - normalize to -1 ~ 1 - ''' - return (data.astype(np.float32)/255.0-0.5)/0.5 - - def anti_normalize(self,data): - return np.clip((data*0.5+0.5)*255,0,255).astype(np.uint8) - - def next(self): - if self.t != 0: - self.previous_pred = None - self.ori_load_pool [:self.opt.S*self.opt.T-1] = self.ori_load_pool [1:self.opt.S*self.opt.T] - self.mosaic_load_pool[:self.opt.S*self.opt.T-1] = self.mosaic_load_pool[1:self.opt.S*self.opt.T] - #print(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg')) - _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) - _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) - _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) - _ori_img = data.random_transform_single_image(_ori_img,opt.finesize,self.transform_params) - _mosaic_img = data.random_transform_single_image(_mosaic_img,opt.finesize,self.transform_params) - - _ori_img,_mosaic_img = self.normalize(_ori_img),self.normalize(_mosaic_img) - self.ori_load_pool [self.opt.S*self.opt.T-1] = _ori_img - self.mosaic_load_pool[self.opt.S*self.opt.T-1] = _mosaic_img - - self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() - self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() - - # stream B,T,H,W,C -> B,C,T,H,W - self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - - self.t += 1 - -class DataLoader(object): - """DataLoader""" - def __init__(self, opt, videolist, test_flag=False): - super(DataLoader, self).__init__() - self.videolist = [] - self.opt = opt - self.test_flag = test_flag - for i in range(self.opt.n_epoch): - self.videolist += videolist - random.shuffle(self.videolist) - self.each_video_n_iter = self.opt.M -self.opt.S*(self.opt.T+1) - self.n_iter = len(self.videolist)//self.opt.load_thread//self.opt.batchsize*self.each_video_n_iter*self.opt.load_thread - self.queue = Queue(self.opt.load_thread) - self.ori_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W - self.mosaic_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W - self.previous_pred = np.zeros((self.opt.batchsize,3,self.opt.finesize,self.opt.finesize),dtype=np.float32) - self.load_init() - - def load(self,videolist): - for load_video_iter in range(len(videolist)//self.opt.batchsize): - iter_videolist = videolist[load_video_iter*self.opt.batchsize:(load_video_iter+1)*self.opt.batchsize] - videoloaders = [TrainVideoLoader(self.opt,os.path.join(self.opt.dataset,iter_videolist[i]),self.test_flag) for i in range(self.opt.batchsize)] - for each_video_iter in range(self.each_video_n_iter): - for i in range(self.opt.batchsize): - self.ori_stream[i] = videoloaders[i].ori_stream - self.mosaic_stream[i] = videoloaders[i].mosaic_stream - if each_video_iter == 0: - self.previous_pred[i] = videoloaders[i].previous_pred - videoloaders[i].next() - if each_video_iter == 0: - self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),self.previous_pred]) - else: - self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),None]) - - def load_init(self): - ptvn = len(self.videolist)//self.opt.load_thread #pre_thread_video_num - for i in range(self.opt.load_thread): - p = Process(target=self.load,args=(self.videolist[i*ptvn:(i+1)*ptvn],)) - p.daemon = True - p.start() - - def get_data(self): - return self.queue.get() - ''' --------------------------Init-------------------------- ''' @@ -186,21 +59,21 @@ def get_data(self): localtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) tensorboard_savedir = os.path.join('checkpoints/tensorboard',localtime+'_'+opt.savename) TBGlobalWriter = SummaryWriter(tensorboard_savedir) -net = BVDNet.BVDNet(opt.N) - +''' +--------------------------Init Network-------------------------- +''' if opt.gpu_id != '-1' and len(opt.gpu_id) == 1: torch.backends.cudnn.benchmark = True - net.cuda() -elif opt.gpu_id != '-1' and len(opt.gpu_id) > 1: - torch.backends.cudnn.benchmark = True - net = nn.DataParallel(net) - net.cuda() +netG = BVDNet.define_G(opt.N,gpu_id=opt.gpu_id) -optimizer = torch.optim.Adam(net.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) -lossf_L1 = nn.L1Loss() -lossf_VGG = BVDNet.VGGLoss([opt.gpu_id]) +optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) +lossf_L2 = nn.MSELoss() +lossf_VGG = model_util.VGGLoss(opt.gpu_id) +''' +--------------------------Init DataLoader-------------------------- +''' videolist_tmp = os.listdir(opt.dataset) videolist = [] for video in videolist_tmp: @@ -211,33 +84,36 @@ def get_data(self): videolist_train = videolist[:int(len(videolist)*0.8)].copy() videolist_eval = videolist[int(len(videolist)*0.8):].copy() -dataloader_train = DataLoader(opt, videolist_train) -dataloader_eval = DataLoader(opt, videolist_eval) +Videodataloader_train = dataloader.VideoDataLoader(opt, videolist_train) +Videodataloader_eval = dataloader.VideoDataLoader(opt, videolist_eval) +''' +--------------------------Train-------------------------- +''' previous_predframe_tmp = 0 -for train_iter in range(dataloader_train.n_iter): +for train_iter in range(Videodataloader_train.n_iter): t_start = time.time() # train - ori_stream,mosaic_stream,previous_frame = dataloader_train.get_data() + ori_stream,mosaic_stream,previous_frame = Videodataloader_train.get_data() ori_stream = data.to_tensor(ori_stream, opt.gpu_id) mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) if previous_frame is None: previous_frame = data.to_tensor(previous_predframe_tmp, opt.gpu_id) else: previous_frame = data.to_tensor(previous_frame, opt.gpu_id) - optimizer.zero_grad() - out = net(mosaic_stream,previous_frame) - loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) + optimizer_G.zero_grad() + out = netG(mosaic_stream,previous_frame) + loss_L2 = lossf_L2(out,ori_stream[:,:,opt.N]) * opt.lambda_L2 loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG - TBGlobalWriter.add_scalars('loss/train', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) - loss = loss_L1+loss_VGG + TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) + loss = loss_L2+loss_VGG loss.backward() - optimizer.step() + optimizer_G.step() previous_predframe_tmp = out.detach().cpu().numpy() # save network if train_iter%opt.save_freq == 0 and train_iter != 0: - model_util.save(net, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.gpu_id) + model_util.save(netG, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.gpu_id) # psnr if train_iter%opt.psnr_freq ==0: @@ -254,10 +130,12 @@ def get_data(self): data.tensor2im(ori_stream[:,:,opt.N],rgb2bgr = False,batch_index=i)] show_img = impro.splice(show_imgs, (opt.showresult_num,3)) TBGlobalWriter.add_image('train', show_img,train_iter,dataformats='HWC') - - # eval + + ''' + --------------------------Eval-------------------------- + ''' if (train_iter)%5 ==0: - ori_stream,mosaic_stream,previous_frame = dataloader_eval.get_data() + ori_stream,mosaic_stream,previous_frame = Videodataloader_eval.get_data() ori_stream = data.to_tensor(ori_stream, opt.gpu_id) mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) if previous_frame is None: @@ -265,10 +143,10 @@ def get_data(self): else: previous_frame = data.to_tensor(previous_frame, opt.gpu_id) with torch.no_grad(): - out = net(mosaic_stream,previous_frame) - loss_L1 = lossf_L1(out,ori_stream[:,:,opt.N]) + out = netG(mosaic_stream,previous_frame) + loss_L2 = lossf_L2(out,ori_stream[:,:,opt.N]) loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG - TBGlobalWriter.add_scalars('loss/eval', {'L1':loss_L1.item(),'VGG':loss_VGG.item()}, train_iter) + TBGlobalWriter.add_scalars('loss/eval', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) previous_predframe_tmp = out.detach().cpu().numpy() #psnr @@ -277,7 +155,7 @@ def get_data(self): for i in range(len(out)): psnr += impro.psnr(data.tensor2im(out,batch_index=i), data.tensor2im(ori_stream[:,:,opt.N],batch_index=i)) TBGlobalWriter.add_scalars('psnr', {'eval':psnr/len(out)}, train_iter) - + #show if train_iter % opt.showresult_freq == 0: show_imgs = [] for i in range(opt.showresult_num): @@ -287,11 +165,13 @@ def get_data(self): show_img = impro.splice(show_imgs, (opt.showresult_num,3)) TBGlobalWriter.add_image('eval', show_img,train_iter,dataformats='HWC') t_end = time.time() - print('iter:{0:d} t:{1:.2f} l1:{2:.4f} vgg:{3:.4f} psnr:{4:.2f}'.format(train_iter,t_end-t_start, - loss_L1.item(),loss_VGG.item(),psnr/len(out)) ) + print('iter:{0:d} t:{1:.2f} L2:{2:.4f} vgg:{3:.4f} psnr:{4:.2f}'.format(train_iter,t_end-t_start, + loss_L2.item(),loss_VGG.item(),psnr/len(out)) ) t_strat = time.time() - # test + ''' + --------------------------Test-------------------------- + ''' if train_iter % opt.showresult_freq == 0 and os.path.isdir(opt.dataset_test): show_imgs = [] videos = os.listdir(opt.dataset_test) @@ -309,7 +189,7 @@ def get_data(self): mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id,use_transform = False, is0_1 = False) with torch.no_grad(): - out = net(mosaic_stream,previous) + out = netG(mosaic_stream,previous) show_imgs+= [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False),data.tensor2im(out,rgb2bgr = False)] show_img = impro.splice(show_imgs, (len(videos),2)) diff --git a/util/data.py b/util/data.py index 83a14cd..888d5c7 100755 --- a/util/data.py +++ b/util/data.py @@ -5,7 +5,7 @@ import torchvision.transforms as transforms import cv2 from . import image_processing as impro -from . import mosaic +from . import degradater transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) @@ -75,51 +75,6 @@ def shuffledata(data,target): np.random.set_state(state) np.random.shuffle(target) -# def random_transform_video(src,target,finesize,N): -# #random blur -# if random.random()<0.2: -# h,w = src.shape[:2] -# src = src[:8*(h//8),:8*(w//8)] -# Q_ran = random.randint(1,15) -# src[:,:,:3*N] = impro.dctblur(src[:,:,:3*N],Q_ran) -# target = impro.dctblur(target,Q_ran) - -# #random crop -# h,w = target.shape[:2] -# h_move = int((h-finesize)*random.random()) -# w_move = int((w-finesize)*random.random()) -# target = target[h_move:h_move+finesize,w_move:w_move+finesize,:] -# src = src[h_move:h_move+finesize,w_move:w_move+finesize,:] - -# #random flip -# if random.random()<0.5: -# src = src[:,::-1,:] -# target = target[:,::-1,:] - -# #random color -# alpha = random.uniform(-0.1,0.1) -# beta = random.uniform(-0.1,0.1) -# b = random.uniform(-0.05,0.05) -# g = random.uniform(-0.05,0.05) -# r = random.uniform(-0.05,0.05) -# for i in range(N): -# src[:,:,i*3:(i+1)*3] = impro.color_adjust(src[:,:,i*3:(i+1)*3],alpha,beta,b,g,r) -# target = impro.color_adjust(target,alpha,beta,b,g,r) - -# #random resize blur -# if random.random()<0.5: -# interpolations = [cv2.INTER_LINEAR,cv2.INTER_CUBIC,cv2.INTER_LANCZOS4] -# size_ran = random.uniform(0.7,1.5) -# interpolation_up = interpolations[random.randint(0,2)] -# interpolation_down =interpolations[random.randint(0,2)] - -# tmp = cv2.resize(src[:,:,:3*N], (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) -# src[:,:,:3*N] = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) - -# tmp = cv2.resize(target, (int(finesize*size_ran),int(finesize*size_ran)),interpolation=interpolation_up) -# target = cv2.resize(tmp, (finesize,finesize),interpolation=interpolation_down) - -# return src,target def random_transform_single_mask(img,out_shape): out_h,out_w = out_shape @@ -138,40 +93,27 @@ def random_transform_single_mask(img,out_shape): return img def get_transform_params(): - scale_flag = np.random.random()<0.2 crop_flag = True rotat_flag = np.random.random()<0.2 color_flag = True flip_flag = np.random.random()<0.2 - blur_flag = np.random.random()<0.1 - flag_dict = {'scale':scale_flag,'crop':crop_flag,'rotat':rotat_flag,'color':color_flag, - 'flip':flip_flag,'blur':blur_flag} + degradate_flag = np.random.random()<0.5 + flag_dict = {'crop':crop_flag,'rotat':rotat_flag,'color':color_flag,'flip':flip_flag,'degradate':degradate_flag} - scale_rate = np.random.uniform(0.9,1.1) crop_rate = [np.random.random(),np.random.random()] rotat_rate = np.random.random() color_rate = [np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05), np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05)] flip_rate = np.random.random() - blur_rate = np.random.randint(1,15) - rate_dict = {'scale':scale_rate,'crop':crop_rate,'rotat':rotat_rate,'color':color_rate, - 'flip':flip_rate,'blur':blur_rate} + degradate_params = degradater.get_random_degenerate_params(mod='weaker_1') + rate_dict = {'crop':crop_rate,'rotat':rotat_rate,'color':color_rate,'flip':flip_rate,'degradate':degradate_params} return {'flag':flag_dict,'rate':rate_dict} def random_transform_single_image(img,finesize,params=None,test_flag = False): if params is None: params = get_transform_params() - if test_flag: - params['flag']['scale'] = False - if params['flag']['scale']: - h,w = img.shape[:2] - loadsize = min((h,w)) - a = (float(h)/float(w))*params['rate']['scale'] - if h B,C,T,H,W + self.ori_stream = self.ori_stream.reshape (1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + + #Init frist previous frame + self.previous_pred = self.ori_load_pool[self.opt.S*self.opt.N-1].copy() + # previous B,C,H,W + self.previous_pred = self.previous_pred.reshape(1,opt.finesize,opt.finesize,3).transpose((0,3,1,2)) + + def normalize(self,data): + ''' + normalize to -1 ~ 1 + ''' + return (data.astype(np.float32)/255.0-0.5)/0.5 + + def anti_normalize(self,data): + return np.clip((data*0.5+0.5)*255,0,255).astype(np.uint8) + + def next(self): + if self.t != 0: + self.previous_pred = None + self.ori_load_pool [:self.opt.S*self.opt.T-1] = self.ori_load_pool [1:self.opt.S*self.opt.T] + self.mosaic_load_pool[:self.opt.S*self.opt.T-1] = self.mosaic_load_pool[1:self.opt.S*self.opt.T] + #print(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg')) + _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) + _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) + _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) + _ori_img = data.random_transform_single_image(_ori_img,self.opt.finesize,self.transform_params) + _mosaic_img = data.random_transform_single_image(_mosaic_img,self.opt.finesize,self.transform_params) + + _ori_img,_mosaic_img = self.normalize(_ori_img),self.normalize(_mosaic_img) + self.ori_load_pool [self.opt.S*self.opt.T-1] = _ori_img + self.mosaic_load_pool[self.opt.S*self.opt.T-1] = _mosaic_img + + self.ori_stream = self.ori_load_pool [np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + self.mosaic_stream = self.mosaic_load_pool[np.linspace(0, (self.opt.T-1)*self.opt.S,self.opt.T,dtype=np.int64)].copy() + + # stream B,T,H,W,C -> B,C,T,H,W + self.ori_stream = self.ori_stream.reshape (1,self.opt.T,self.opt.finesize,self.opt.finesize,3).transpose((0,4,1,2,3)) + self.mosaic_stream = self.mosaic_stream.reshape(1,self.opt.T,self.opt.finesize,self.opt.finesize,3).transpose((0,4,1,2,3)) + + self.t += 1 + +class VideoDataLoader(object): + """VideoDataLoader""" + def __init__(self, opt, videolist, test_flag=False): + super(VideoDataLoader, self).__init__() + self.videolist = [] + self.opt = opt + self.test_flag = test_flag + for i in range(self.opt.n_epoch): + self.videolist += videolist + random.shuffle(self.videolist) + self.each_video_n_iter = self.opt.M -self.opt.S*(self.opt.T+1) + self.n_iter = len(self.videolist)//self.opt.load_thread//self.opt.batchsize*self.each_video_n_iter*self.opt.load_thread + self.queue = Queue(self.opt.load_thread) + self.ori_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W + self.mosaic_stream = np.zeros((self.opt.batchsize,3,self.opt.T,self.opt.finesize,self.opt.finesize),dtype=np.float32)# B,C,T,H,W + self.previous_pred = np.zeros((self.opt.batchsize,3,self.opt.finesize,self.opt.finesize),dtype=np.float32) + self.load_init() + + def load(self,videolist): + for load_video_iter in range(len(videolist)//self.opt.batchsize): + iter_videolist = videolist[load_video_iter*self.opt.batchsize:(load_video_iter+1)*self.opt.batchsize] + videoloaders = [VideoLoader(self.opt,os.path.join(self.opt.dataset,iter_videolist[i]),self.test_flag) for i in range(self.opt.batchsize)] + for each_video_iter in range(self.each_video_n_iter): + for i in range(self.opt.batchsize): + self.ori_stream[i] = videoloaders[i].ori_stream + self.mosaic_stream[i] = videoloaders[i].mosaic_stream + if each_video_iter == 0: + self.previous_pred[i] = videoloaders[i].previous_pred + videoloaders[i].next() + if each_video_iter == 0: + self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),self.previous_pred]) + else: + self.queue.put([self.ori_stream.copy(),self.mosaic_stream.copy(),None]) + + def load_init(self): + ptvn = len(self.videolist)//self.opt.load_thread #pre_thread_video_num + for i in range(self.opt.load_thread): + p = Process(target=self.load,args=(self.videolist[i*ptvn:(i+1)*ptvn],)) + p.daemon = True + p.start() + + def get_data(self): + return self.queue.get() \ No newline at end of file diff --git a/util/degradater.py b/util/degradater.py new file mode 100644 index 0000000..15b2e7f --- /dev/null +++ b/util/degradater.py @@ -0,0 +1,119 @@ +''' +https://github.com/sonack/GFRNet_pytorch_new +''' +import random +import cv2 +import numpy as np + +def gaussian_blur(img, sigma=3, size=13): + if sigma > 0: + if isinstance(size, int): + size = (size, size) + img = cv2.GaussianBlur(img, size, sigma) + return img + +def down(img, scale, shape): + if scale > 1: + h, w, _ = shape + scaled_h, scaled_w = int(h / scale), int(w / scale) + img = cv2.resize(img, (scaled_w, scaled_h), interpolation = cv2.INTER_CUBIC) + return img + +def up(img, scale, shape): + if scale > 1: + h, w, _ = shape + img = cv2.resize(img, (w, h), interpolation = cv2.INTER_CUBIC) + return img + +def awgn(img, level): + if level > 0: + noise = np.random.randn(*img.shape) * level + img = (img + noise).clip(0,255).astype(np.uint8) + return img + +def jpeg_compressor(img,quality): + if quality > 0: # 0 indicating no lossy compression (i.e losslessly compression) + encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality] + img = cv2.imdecode(cv2.imencode('.jpg', img, encode_param)[1], 1) + return img + +def get_random_degenerate_params(mod='strong'): + ''' + mod : strong | only_downsample | only_4x | weaker_1 | weaker_2 + ''' + params = {} + gaussianBlur_size_list = list(range(3,14,2)) + + if mod == 'strong': + gaussianBlur_sigma_list = [1 + x for x in range(3)] + gaussianBlur_sigma_list += [0] + downsample_scale_list = [1 + x * 0.1 for x in range(0,71)] + awgn_level_list = list(range(1, 8, 1)) + jpeg_quality_list = list(range(10, 41, 1)) + jpeg_quality_list += int(len(jpeg_quality_list) * 0.33) * [0] + + elif mod == 'only_downsample': + gaussianBlur_sigma_list = [0] + downsample_scale_list = [1 + x * 0.1 for x in range(0,71)] + awgn_level_list = [0] + jpeg_quality_list = [0] + + elif mod == 'only_4x': + gaussianBlur_sigma_list = [0] + downsample_scale_list = [4] + awgn_level_list = [0] + jpeg_quality_list = [0] + + elif mod == 'weaker_1': # 0.5 trigger prob + gaussianBlur_sigma_list = [1 + x for x in range(3)] + gaussianBlur_sigma_list += int(len(gaussianBlur_sigma_list)) * [0] # 1/2 trigger this degradation + + downsample_scale_list = [1 + x * 0.1 for x in range(0,71)] + downsample_scale_list += int(len(downsample_scale_list)) * [1] + + awgn_level_list = list(range(1, 8, 1)) + awgn_level_list += int(len(awgn_level_list)) * [0] + + jpeg_quality_list = list(range(10, 41, 1)) + jpeg_quality_list += int(len(jpeg_quality_list)) * [0] + + elif mod == 'weaker_2': # weaker than weaker_1, jpeg [20,40] + gaussianBlur_sigma_list = [1 + x for x in range(3)] + gaussianBlur_sigma_list += int(len(gaussianBlur_sigma_list)) * [0] # 1/2 trigger this degradation + + downsample_scale_list = [1 + x * 0.1 for x in range(0,71)] + downsample_scale_list += int(len(downsample_scale_list)) * [1] + + awgn_level_list = list(range(1, 8, 1)) + awgn_level_list += int(len(awgn_level_list)) * [0] + + jpeg_quality_list = list(range(20, 41, 1)) + jpeg_quality_list += int(len(jpeg_quality_list)) * [0] + + params['blur_sigma'] = random.choice(gaussianBlur_sigma_list) + params['blur_size'] = random.choice(gaussianBlur_size_list) + params['updown_scale'] = random.choice(downsample_scale_list) + params['awgn_level'] = random.choice(awgn_level_list) + params['jpeg_quality'] = random.choice(jpeg_quality_list) + + return params + +def degradate(img,params,jpeg_last = False): + shape = img.shape + if not params: + params = get_random_degenerate_params('original') + + if jpeg_last: + img = gaussian_blur(img,params['blur_sigma'],params['blur_size']) + img = down(img,params['updown_scale'],shape) + img = awgn(img,params['awgn_level']) + img = up(img,params['updown_scale'],shape) + img = jpeg_compressor(img,params['jpeg_quality']) + else: + img = gaussian_blur(img,params['blur_sigma'],params['blur_size']) + img = down(img,params['updown_scale'],shape) + img = awgn(img,params['awgn_level']) + img = jpeg_compressor(img,params['jpeg_quality']) + img = up(img,params['updown_scale'],shape) + + return img \ No newline at end of file diff --git a/util/image_processing.py b/util/image_processing.py index 6b66812..c832475 100755 --- a/util/image_processing.py +++ b/util/image_processing.py @@ -8,15 +8,6 @@ if 'Windows' in platform.platform(): system_type = 'Windows' -DCT_Q = np.array([[8,16,19,22,26,27,29,34], - [16,16,22,24,27,29,34,37], - [19,22,26,27,29,34,34,38], - [22,22,26,27,29,34,37,40], - [22,26,27,29,32,35,40,48], - [26,27,29,32,35,40,48,58], - [26,27,29,34,38,46,56,59], - [27,29,35,38,46,56,69,83]]) - def imread(file_path,mod = 'normal',loadsize = 0, rgb=False): ''' mod: 'normal' | 'gray' | 'all' @@ -121,34 +112,6 @@ def makedataset(target_image,orgin_image): img[0:256,0:256] = target_image[0:256,int(w/2-256/2):int(w/2+256/2)] img[0:256,256:512] = orgin_image[0:256,int(w/2-256/2):int(w/2+256/2)] return img - -def block_dct_and_idct(g,QQF,QQF_16): - return cv2.idct(np.round(16.0*cv2.dct(g)/QQF)*QQF_16) - -def image_dct_and_idct(I,QF): - h,w = I.shape - QQF = DCT_Q*QF - QQF_16 = QQF/16.0 - for i in range(h//8): - for j in range(w//8): - I[i*8:(i+1)*8,j*8:(j+1)*8] = cv2.idct(np.round(16.0*cv2.dct(I[i*8:(i+1)*8,j*8:(j+1)*8])/QQF)*QQF_16) - #I[i*8:(i+1)*8,j*8:(j+1)*8] = block_dct_and_idct(I[i*8:(i+1)*8,j*8:(j+1)*8],QQF,QQF_16) - return I - -def dctblur(img,Q): - ''' - Q: 1~20, 1->best - ''' - h,w = img.shape[:2] - img = img[:8*(h//8),:8*(w//8)] - img = img.astype(np.float32) - if img.ndim == 2: - img = image_dct_and_idct(img, Q) - if img.ndim == 3: - h,w,ch = img.shape - for i in range(ch): - img[:,:,i] = image_dct_and_idct(img[:,:,i], Q) - return (np.clip(img,0,255)).astype(np.uint8) def find_mostlikely_ROI(mask): contours,hierarchy=cv2.findContours(mask, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE) @@ -215,19 +178,6 @@ def mask_area(mask): area = 0 return area - -def Q_lapulase(resImg): - ''' - Evaluate image quality - score > 20 normal - score > 50 clear - ''' - img2gray = cv2.cvtColor(resImg, cv2.COLOR_BGR2GRAY) - img2gray = resize(img2gray,512) - res = cv2.Laplacian(img2gray, cv2.CV_64F) - score = res.var() - return score - def replace_mosaic(img_origin,img_fake,mask,x,y,size,no_feather): img_fake = cv2.resize(img_fake,(size*2,size*2),interpolation=cv2.INTER_LANCZOS4) if no_feather: @@ -257,6 +207,18 @@ def replace_mosaic(img_origin,img_fake,mask,x,y,size,no_feather): return img_result +def Q_lapulase(resImg): + ''' + Evaluate image quality + score > 20 normal + score > 50 clear + ''' + img2gray = cv2.cvtColor(resImg, cv2.COLOR_BGR2GRAY) + img2gray = resize(img2gray,512) + res = cv2.Laplacian(img2gray, cv2.CV_64F) + score = res.var() + return score + def psnr(img1,img2): mse = np.mean((img1/255.0-img2/255.0)**2) if mse < 1e-10: From 796b59d036057fe8b45a92635ce04b22da52a7c6 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Thu, 22 Apr 2021 22:15:33 +0800 Subject: [PATCH 6/9] sngan --- models/BVDNet.py | 13 ++-- models/model_util.py | 4 +- train/clean/train.py | 144 ++++++++++++++++++++++++++----------------- util/dataloader.py | 7 ++- 4 files changed, 101 insertions(+), 67 deletions(-) diff --git a/models/BVDNet.py b/models/BVDNet.py index ada2ee0..917007a 100644 --- a/models/BVDNet.py +++ b/models/BVDNet.py @@ -103,7 +103,7 @@ def define_G(N=2, n_blocks=1, gpu_id='-1'): return netG ################################Discriminator################################ -def define_D(input_nc, ndf, n_layers_D, use_sigmoid=False, num_D=1, gpu_id='-1'): +def define_D(input_nc=6, ndf=64, n_layers_D=3, use_sigmoid=False, num_D=4, gpu_id='-1'): netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid, num_D) if gpu_id != '-1' and len(gpu_id) == 1: netD.cuda() @@ -188,20 +188,19 @@ def __init__(self, mode='D'): def forward(self, dis_fake = None, dis_real = None): if isinstance(dis_fake, list): - weight = 2**len(dis_fake) if self.mode == 'D': loss = 0 for i in range(len(dis_fake)): - weight = weight/2 - loss += weight*self.lossf(dis_fake[i],dis_real[i]) + loss += self.lossf(dis_fake[i][0],dis_real[i][0]) elif self.mode =='G': loss = 0 + weight = 2**len(dis_fake) for i in range(len(dis_fake)): weight = weight/2 - loss += weight*self.lossf(dis_fake[i]) + loss += weight*self.lossf(dis_fake[i][0]) return loss else: if self.mode == 'D': - return self.lossf(dis_fake,dis_real) + return self.lossf(dis_fake[0],dis_real[0]) elif self.mode =='G': - return self.lossf(dis_fake) + return self.lossf(dis_fake[0]) diff --git a/models/model_util.py b/models/model_util.py index 6c9d89e..e2fef97 100644 --- a/models/model_util.py +++ b/models/model_util.py @@ -114,7 +114,7 @@ def __init__(self): super(HingeLossG, self).__init__() def forward(self, dis_fake): - loss_fake = -torch.mean(dis_fake) + loss_fake = F.relu(-torch.mean(dis_fake)) return loss_fake class VGGLoss(nn.Module): @@ -128,7 +128,7 @@ def __init__(self, gpu_id): self.vgg = nn.DataParallel(self.vgg) self.vgg.cuda() - self.criterion = nn.L1Loss() + self.criterion = nn.MSELoss() self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0] def forward(self, x, y): diff --git a/train/clean/train.py b/train/clean/train.py index 38be2c7..1cba002 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -12,10 +12,10 @@ import torch.nn as nn import time -from util import mosaic,util,ffmpeg,filt,data,dataloader +from util import util,data,dataloader from util import image_processing as impro from models import BVDNet,model_util -import torch.backends.cudnn as cudnn +from skimage.metrics import structural_similarity from tensorboardX import SummaryWriter ''' @@ -44,7 +44,26 @@ opt.parser.add_argument('--savename',type=str,default='face', help='') opt.parser.add_argument('--showresult_freq',type=int,default=1000, help='') opt.parser.add_argument('--showresult_num',type=int,default=4, help='') -opt.parser.add_argument('--psnr_freq',type=int,default=100, help='') + +def ImageQualityEvaluation(tensor1,tensor2,showiter,writer,tag): + batch_len = len(tensor1) + psnr,ssmi = 0,0 + for i in range(len(tensor1)): + img1,img2 = data.tensor2im(tensor1,rgb2bgr=False,batch_index=i), data.tensor2im(tensor2,rgb2bgr=False,batch_index=i) + psnr += impro.psnr(img1,img2) + ssmi += structural_similarity(img1,img2,multichannel=True) + writer.add_scalars('quality/psnr', {tag:psnr/batch_len}, showiter) + writer.add_scalars('quality/ssmi', {tag:ssmi/batch_len}, showiter) + return psnr/batch_len,ssmi/batch_len + +def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): + show_imgs = [] + for i in range(max_num): + show_imgs += [ data.tensor2im(tensor1,rgb2bgr = False,batch_index=i), + data.tensor2im(tensor2,rgb2bgr = False,batch_index=i), + data.tensor2im(tensor3,rgb2bgr = False,batch_index=i)] + show_img = impro.splice(show_imgs, (opt.showresult_num,3)) + writer.add_image(tag, show_img,showiter,dataformats='HWC') ''' --------------------------Init-------------------------- @@ -66,10 +85,15 @@ if opt.gpu_id != '-1' and len(opt.gpu_id) == 1: torch.backends.cudnn.benchmark = True netG = BVDNet.define_G(opt.N,gpu_id=opt.gpu_id) +netD = BVDNet.define_D(gpu_id=opt.gpu_id) optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) -lossf_L2 = nn.MSELoss() -lossf_VGG = model_util.VGGLoss(opt.gpu_id) +optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + +lossfun_L2 = nn.MSELoss() +lossfun_VGG = model_util.VGGLoss(opt.gpu_id) +lossfun_GAND = BVDNet.GANLoss('D') +lossfun_GANG = BVDNet.GANLoss('G') ''' --------------------------Init DataLoader-------------------------- @@ -101,35 +125,46 @@ previous_frame = data.to_tensor(previous_predframe_tmp, opt.gpu_id) else: previous_frame = data.to_tensor(previous_frame, opt.gpu_id) - optimizer_G.zero_grad() + + ############### Forward #################### + # Fake Generator out = netG(mosaic_stream,previous_frame) - loss_L2 = lossf_L2(out,ori_stream[:,:,opt.N]) * opt.lambda_L2 - loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG - TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) - loss = loss_L2+loss_VGG - loss.backward() + # Discriminator + dis_real = netD(torch.cat((mosaic_stream[:,:,opt.N],ori_stream[:,:,opt.N].detach()),dim=1)) + dis_fake_D = netD(torch.cat((mosaic_stream[:,:,opt.N],out.detach()),dim=1)) + loss_D = lossfun_GAND(dis_fake_D,dis_real) * opt.lambda_GAN + # Generator + dis_fake_G = netD(torch.cat((mosaic_stream[:,:,opt.N],out),dim=1)) + loss_L2 = lossfun_L2(out,ori_stream[:,:,opt.N]) * opt.lambda_L2 + loss_VGG = lossfun_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG + loss_GANG = lossfun_GANG(dis_fake_G) * opt.lambda_GAN + loss_G = loss_L2+loss_VGG+loss_GANG + + ############### Backward Pass #################### + optimizer_G.zero_grad() + loss_G.backward() optimizer_G.step() + + optimizer_D.zero_grad() + loss_D.backward() + optimizer_D.step() previous_predframe_tmp = out.detach().cpu().numpy() + TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item(), + 'loss_D':loss_D.item(),'loss_G':loss_G.item()}, train_iter) + # save network if train_iter%opt.save_freq == 0 and train_iter != 0: - model_util.save(netG, os.path.join('checkpoints',opt.savename,str(train_iter)+'.pth'), opt.gpu_id) - - # psnr - if train_iter%opt.psnr_freq ==0: - psnr = 0 - for i in range(len(out)): - psnr += impro.psnr(data.tensor2im(out,batch_index=i), data.tensor2im(ori_stream[:,:,opt.N],batch_index=i)) - TBGlobalWriter.add_scalars('psnr', {'train':psnr/len(out)}, train_iter) + model_util.save(netG, os.path.join('checkpoints',opt.savename,str(train_iter)+'_G.pth'), opt.gpu_id) + model_util.save(netD, os.path.join('checkpoints',opt.savename,str(train_iter)+'_D.pth'), opt.gpu_id) + # Image quality evaluation + if train_iter%(opt.showresult_freq//10) == 0: + ImageQualityEvaluation(out,ori_stream[:,:,opt.N],train_iter,TBGlobalWriter,'train') + + # Show result if train_iter % opt.showresult_freq == 0: - show_imgs = [] - for i in range(opt.showresult_num): - show_imgs += [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False,batch_index=i), - data.tensor2im(out,rgb2bgr = False,batch_index=i), - data.tensor2im(ori_stream[:,:,opt.N],rgb2bgr = False,batch_index=i)] - show_img = impro.splice(show_imgs, (opt.showresult_num,3)) - TBGlobalWriter.add_image('train', show_img,train_iter,dataformats='HWC') + ShowImage(mosaic_stream[:,:,opt.N],out,ori_stream[:,:,opt.N],train_iter,opt.showresult_num,TBGlobalWriter,'train') ''' --------------------------Eval-------------------------- @@ -144,29 +179,21 @@ previous_frame = data.to_tensor(previous_frame, opt.gpu_id) with torch.no_grad(): out = netG(mosaic_stream,previous_frame) - loss_L2 = lossf_L2(out,ori_stream[:,:,opt.N]) - loss_VGG = lossf_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG - TBGlobalWriter.add_scalars('loss/eval', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) + loss_L2 = lossfun_L2(out,ori_stream[:,:,opt.N]) * opt.lambda_L2 + loss_VGG = lossfun_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG + #TBGlobalWriter.add_scalars('loss/eval', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) previous_predframe_tmp = out.detach().cpu().numpy() - #psnr - if (train_iter)%opt.psnr_freq ==0: - psnr = 0 - for i in range(len(out)): - psnr += impro.psnr(data.tensor2im(out,batch_index=i), data.tensor2im(ori_stream[:,:,opt.N],batch_index=i)) - TBGlobalWriter.add_scalars('psnr', {'eval':psnr/len(out)}, train_iter) - #show + # Image quality evaluation + if train_iter%(opt.showresult_freq//10) == 0: + psnr,ssmi = ImageQualityEvaluation(out,ori_stream[:,:,opt.N],train_iter,TBGlobalWriter,'eval') + + # Show result if train_iter % opt.showresult_freq == 0: - show_imgs = [] - for i in range(opt.showresult_num): - show_imgs += [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False,batch_index=i), - data.tensor2im(out,rgb2bgr = False,batch_index=i), - data.tensor2im(ori_stream[:,:,opt.N],rgb2bgr = False,batch_index=i)] - show_img = impro.splice(show_imgs, (opt.showresult_num,3)) - TBGlobalWriter.add_image('eval', show_img,train_iter,dataformats='HWC') + ShowImage(mosaic_stream[:,:,opt.N],out,ori_stream[:,:,opt.N],train_iter,opt.showresult_num,TBGlobalWriter,'eval') t_end = time.time() - print('iter:{0:d} t:{1:.2f} L2:{2:.4f} vgg:{3:.4f} psnr:{4:.2f}'.format(train_iter,t_end-t_start, - loss_L2.item(),loss_VGG.item(),psnr/len(out)) ) + print('iter:{0:d} t:{1:.2f} L2:{2:.4f} vgg:{3:.4f} psnr:{4:.2f} ssmi:{5:.3f}'.format(train_iter,t_end-t_start, + loss_L2.item(),loss_VGG.item(),psnr,ssmi) ) t_strat = time.time() ''' @@ -179,18 +206,21 @@ for video in videos: frames = os.listdir(os.path.join(opt.dataset_test,video,'image')) sorted(frames) - mosaic_stream = [] - for i in range(opt.T): - _mosaic = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[i*opt.S]),loadsize=opt.finesize,rgb=True) - mosaic_stream.append(_mosaic) - previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) - mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 - mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) - mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) - previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id,use_transform = False, is0_1 = False) - with torch.no_grad(): - out = netG(mosaic_stream,previous) + for step in range(5): + mosaic_stream = [] + for i in range(opt.T): + _mosaic = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[i*opt.S+step]),loadsize=opt.finesize,rgb=True) + mosaic_stream.append(_mosaic) + if step == 0: + previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) + previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id,use_transform = False, is0_1 = False) + mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 + mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) + mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) + with torch.no_grad(): + out = netG(mosaic_stream,previous) + previous = out show_imgs+= [data.tensor2im(mosaic_stream[:,:,opt.N],rgb2bgr = False),data.tensor2im(out,rgb2bgr = False)] show_img = impro.splice(show_imgs, (len(videos),2)) - TBGlobalWriter.add_image('test', show_img,train_iter,dataformats='HWC') + TBGlobalWriter.add_image('test', show_img,train_iter,dataformats='HWC') \ No newline at end of file diff --git a/util/dataloader.py b/util/dataloader.py index 3c8a401..3c9effe 100644 --- a/util/dataloader.py +++ b/util/dataloader.py @@ -31,7 +31,12 @@ def __init__(self, opt, video_dir, test_flag=False): #Init load pool for i in range(self.opt.S*self.opt.T): - #print(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg')) + # random + if np.random.random()<0.05: + self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] + if np.random.random()<0.02: + self.transform_params['rate']['crop'] = [np.random.random(),np.random.random()] + _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) From 1749be92f3f360e328d7b2945d56b65d3a7f943a Mon Sep 17 00:00:00 2001 From: hypox64 Date: Sat, 24 Apr 2021 14:24:20 +0800 Subject: [PATCH 7/9] Gan code finished! --- cores/core.py | 4 +- models/BVDNet.py | 24 ++--- models/BiSeNet_model.py | 6 +- models/components.py | 234 ---------------------------------------- models/loadmodel.py | 30 ++---- models/model_util.py | 215 +++++++++++++++++++++++++++++++++++- models/runmodel.py | 12 +-- train/clean/train.py | 55 ++++++---- util/data.py | 37 +++---- util/dataloader.py | 24 +++-- util/degradater.py | 2 +- 11 files changed, 301 insertions(+), 342 deletions(-) delete mode 100644 models/components.py diff --git a/cores/core.py b/cores/core.py index 9fb858a..fab7da2 100644 --- a/cores/core.py +++ b/cores/core.py @@ -283,9 +283,9 @@ def cleanmosaic_video_fusion(opt,netG,netM): mosaic_input[:,:,k*3:(k+1)*3] = impro.resize(img_pool[k][y-size:y+size,x-size:x+size], INPUT_SIZE) mask_input = impro.resize(mask,np.min(img_origin.shape[:2]))[y-size:y+size,x-size:x+size] mosaic_input[:,:,-1] = impro.resize(mask_input, INPUT_SIZE) - mosaic_input_tensor = data.im2tensor(mosaic_input,bgr2rgb=False,gpu_id=opt.gpu_id,use_transform = False,is0_1 = False) + mosaic_input_tensor = data.im2tensor(mosaic_input,bgr2rgb=False,gpu_id=opt.gpu_id) unmosaic_pred = netG(mosaic_input_tensor) - img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False ,is0_1 = False) + img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False) img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather) except Exception as e: print('Warning:',e) diff --git a/models/BVDNet.py b/models/BVDNet.py index 917007a..f9ca814 100644 --- a/models/BVDNet.py +++ b/models/BVDNet.py @@ -94,22 +94,14 @@ def forward(self, stream, previous): def define_G(N=2, n_blocks=1, gpu_id='-1'): netG = BVDNet(N = N, n_blocks=n_blocks) - if gpu_id != '-1' and len(gpu_id) == 1: - netG.cuda() - elif gpu_id != '-1' and len(gpu_id) > 1: - netG = nn.DataParallel(netG) - netG.cuda() - # netG.apply(model_util.init_weights) + netG = model_util.todevice(netG,gpu_id) + netG.apply(model_util.init_weights) return netG ################################Discriminator################################ -def define_D(input_nc=6, ndf=64, n_layers_D=3, use_sigmoid=False, num_D=4, gpu_id='-1'): +def define_D(input_nc=6, ndf=64, n_layers_D=1, use_sigmoid=False, num_D=3, gpu_id='-1'): netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, use_sigmoid, num_D) - if gpu_id != '-1' and len(gpu_id) == 1: - netD.cuda() - elif gpu_id != '-1' and len(gpu_id) > 1: - netD = nn.DataParallel(netD) - netD.cuda() + netD = model_util.todevice(netD,gpu_id) netD.apply(model_util.init_weights) return netD @@ -191,16 +183,16 @@ def forward(self, dis_fake = None, dis_real = None): if self.mode == 'D': loss = 0 for i in range(len(dis_fake)): - loss += self.lossf(dis_fake[i][0],dis_real[i][0]) + loss += self.lossf(dis_fake[i][-1],dis_real[i][-1]) elif self.mode =='G': loss = 0 weight = 2**len(dis_fake) for i in range(len(dis_fake)): weight = weight/2 - loss += weight*self.lossf(dis_fake[i][0]) + loss += weight*self.lossf(dis_fake[i][-1]) return loss else: if self.mode == 'D': - return self.lossf(dis_fake[0],dis_real[0]) + return self.lossf(dis_fake[-1],dis_real[-1]) elif self.mode =='G': - return self.lossf(dis_fake[0]) + return self.lossf(dis_fake[-1]) diff --git a/models/BiSeNet_model.py b/models/BiSeNet_model.py index b58ea5b..3675141 100644 --- a/models/BiSeNet_model.py +++ b/models/BiSeNet_model.py @@ -2,7 +2,7 @@ import torch.nn as nn import torch import torch.nn.functional as F -from . import components +from . import model_util import warnings warnings.filterwarnings(action='ignore') @@ -43,7 +43,7 @@ def forward(self, output, target): class resnet18(torch.nn.Module): def __init__(self, pretrained=True): super().__init__() - self.features = components.resnet18(pretrained=pretrained) + self.features = model_util.resnet18(pretrained=pretrained) self.conv1 = self.features.conv1 self.bn1 = self.features.bn1 self.relu = self.features.relu @@ -70,7 +70,7 @@ def forward(self, input): class resnet101(torch.nn.Module): def __init__(self, pretrained=True): super().__init__() - self.features = components.resnet101(pretrained=pretrained) + self.features = model_util.resnet101(pretrained=pretrained) self.conv1 = self.features.conv1 self.bn1 = self.features.bn1 self.relu = self.features.relu diff --git a/models/components.py b/models/components.py deleted file mode 100644 index 59cb333..0000000 --- a/models/components.py +++ /dev/null @@ -1,234 +0,0 @@ -import torch.nn as nn -import torch.utils.model_zoo as model_zoo - - -__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', - 'resnet152'] - - -model_urls = { - 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', - 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', - 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', - 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', - 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', -} - - -def conv3x3(in_planes, out_planes, stride=1): - """3x3 convolution with padding""" - return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, - padding=1, bias=False) - - -def conv1x1(in_planes, out_planes, stride=1): - """1x1 convolution""" - return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) - - -class BasicBlock(nn.Module): - expansion = 1 - - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): - super(BasicBlock, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv3x3(inplanes, planes, stride) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - self.conv2 = conv3x3(planes, planes) - self.bn2 = norm_layer(planes) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class Bottleneck(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): - super(Bottleneck, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 - self.conv1 = conv1x1(inplanes, planes) - self.bn1 = norm_layer(planes) - self.conv2 = conv3x3(planes, planes, stride) - self.bn2 = norm_layer(planes) - self.conv3 = conv1x1(planes, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - self.relu = nn.ReLU(inplace=True) - self.downsample = downsample - self.stride = stride - - def forward(self, x): - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - - out = self.conv3(out) - out = self.bn3(out) - - if self.downsample is not None: - identity = self.downsample(x) - - out += identity - out = self.relu(out) - - return out - - -class ResNet(nn.Module): - - def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None): - super(ResNet, self).__init__() - if norm_layer is None: - norm_layer = nn.BatchNorm2d - self.inplanes = 64 - self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, - bias=False) - self.bn1 = norm_layer(64) - self.relu = nn.ReLU(inplace=True) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) - self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) - self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) - self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) - self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) - self.fc = nn.Linear(512 * block.expansion, num_classes) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. - # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 - if zero_init_residual: - for m in self.modules(): - if isinstance(m, Bottleneck): - nn.init.constant_(m.bn3.weight, 0) - elif isinstance(m, BasicBlock): - nn.init.constant_(m.bn2.weight, 0) - - def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None): - if norm_layer is None: - norm_layer = nn.BatchNorm2d - downsample = None - if stride != 1 or self.inplanes != planes * block.expansion: - downsample = nn.Sequential( - conv1x1(self.inplanes, planes * block.expansion, stride), - norm_layer(planes * block.expansion), - ) - - layers = [] - layers.append(block(self.inplanes, planes, stride, downsample, norm_layer)) - self.inplanes = planes * block.expansion - for _ in range(1, blocks): - layers.append(block(self.inplanes, planes, norm_layer=norm_layer)) - - return nn.Sequential(*layers) - - def forward(self, x): - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - x = self.maxpool(x) - - x = self.layer1(x) - x = self.layer2(x) - x = self.layer3(x) - x = self.layer4(x) - - x = self.avgpool(x) - x = x.view(x.size(0), -1) - x = self.fc(x) - - return x - - -def resnet18(pretrained=False, **kwargs): - """Constructs a ResNet-18 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) - return model - - -def resnet34(pretrained=False, **kwargs): - """Constructs a ResNet-34 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['resnet34'])) - return model - - -def resnet50(pretrained=False, **kwargs): - """Constructs a ResNet-50 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['resnet50'])) - return model - - -def resnet101(pretrained=False, **kwargs): - """Constructs a ResNet-101 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) - return model - - -def resnet152(pretrained=False, **kwargs): - """Constructs a ResNet-152 model. - - Args: - pretrained (bool): If True, returns a model pre-trained on ImageNet - """ - model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) - if pretrained: - model.load_state_dict(model_zoo.load_url(model_urls['resnet152'])) - return model \ No newline at end of file diff --git a/models/loadmodel.py b/models/loadmodel.py index 75ca731..8e5ed1c 100755 --- a/models/loadmodel.py +++ b/models/loadmodel.py @@ -1,7 +1,7 @@ import torch +from . import model_util from .pix2pix_model import define_G from .pix2pixHD_model import define_G as define_G_HD -from .unet_model import UNet from .video_model import MosaicNet from .videoHD_model import MosaicNet as MosaicNet_HD from .BiSeNet_model import BiSeNet @@ -11,19 +11,6 @@ def show_paramsnumber(net,netname='net'): parameters = round(parameters/1e6,2) print(netname+' parameters: '+str(parameters)+'M') -def __patch_instance_norm_state_dict(state_dict, module, keys, i=0): - """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" - key = keys[i] - if i + 1 == len(keys): # at the end, pointing to a parameter/buffer - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'running_mean' or key == 'running_var'): - if getattr(module, key) is None: - state_dict.pop('.'.join(keys)) - if module.__class__.__name__.startswith('InstanceNorm') and \ - (key == 'num_batches_tracked'): - state_dict.pop('.'.join(keys)) - else: - __patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) def pix2pix(opt): # print(opt.model_path,opt.netG) @@ -33,9 +20,8 @@ def pix2pix(opt): netG = define_G(3, 3, 64, opt.netG, norm='batch',use_dropout=True, init_type='normal', gpu_ids=[]) show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) + netG = model_util.todevice(netG,opt.gpu_id) netG.eval() - if opt.gpu_id != -1: - netG.cuda() return netG @@ -57,11 +43,11 @@ def style(opt): # patch InstanceNorm checkpoints prior to 0.4 for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop - __patch_instance_norm_state_dict(state_dict, netG, key.split('.')) + model_util.patch_instance_norm_state_dict(state_dict, netG, key.split('.')) netG.load_state_dict(state_dict) - if opt.gpu_id != -1: - netG.cuda() + netG = model_util.todevice(netG,opt.gpu_id) + netG.eval() return netG def video(opt): @@ -71,9 +57,8 @@ def video(opt): netG = MosaicNet(3*25+1, 3,norm = 'batch') show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) + netG = model_util.todevice(netG,opt.gpu_id) netG.eval() - if opt.gpu_id != -1: - netG.cuda() return netG def bisenet(opt,type='roi'): @@ -86,7 +71,6 @@ def bisenet(opt,type='roi'): net.load_state_dict(torch.load(opt.model_path)) elif type == 'mosaic': net.load_state_dict(torch.load(opt.mosaic_position_model_path)) + net = model_util.todevice(net,opt.gpu_id) net.eval() - if opt.gpu_id != -1: - net.cuda() return net diff --git a/models/model_util.py b/models/model_util.py index e2fef97..2aa7f9e 100644 --- a/models/model_util.py +++ b/models/model_util.py @@ -8,7 +8,9 @@ import torch.nn.functional as F import torch.nn.utils.spectral_norm as SpectralNorm from torchvision import models +import torch.utils.model_zoo as model_zoo +################################## IO ################################## def save(net,path,gpu_id): if isinstance(net, nn.DataParallel): torch.save(net.module.cpu().state_dict(),path) @@ -17,6 +19,29 @@ def save(net,path,gpu_id): if gpu_id != '-1': net.cuda() +def todevice(net,gpu_id): + if gpu_id != '-1' and len(gpu_id) == 1: + net.cuda() + elif gpu_id != '-1' and len(gpu_id) > 1: + net = nn.DataParallel(net) + net.cuda() + return net + +# patch InstanceNorm checkpoints prior to 0.4 +def patch_instance_norm_state_dict(state_dict, module, keys, i=0): + """Fix InstanceNorm checkpoints incompatibility (prior to 0.4)""" + key = keys[i] + if i + 1 == len(keys): # at the end, pointing to a parameter/buffer + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'running_mean' or key == 'running_var'): + if getattr(module, key) is None: + state_dict.pop('.'.join(keys)) + if module.__class__.__name__.startswith('InstanceNorm') and \ + (key == 'num_batches_tracked'): + state_dict.pop('.'.join(keys)) + else: + patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1) + ################################## initialization ################################## def get_norm_layer(norm_type='instance',mod = '2d'): if norm_type == 'batch': @@ -60,6 +85,7 @@ def init_func(m): net.apply(init_func) ################################## Network structure ################################## +################################## ResnetBlock ################################## class ResnetBlockSpectralNorm(nn.Module): def __init__(self, dim, padding_type, activation=nn.LeakyReLU(0.2), use_dropout=False): super(ResnetBlockSpectralNorm, self).__init__() @@ -99,6 +125,193 @@ def forward(self, x): out = x + self.conv_block(x) return out +################################## Resnet ################################## +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class Bottleneck(nn.Module): + expansion = 4 + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = norm_layer(planes) + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = norm_layer(planes) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self.inplanes = 64 + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0], norm_layer=norm_layer) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, norm_layer=norm_layer) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, norm_layer=norm_layer) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, norm_layer=norm_layer) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, norm_layer=None): + if norm_layer is None: + norm_layer = nn.BatchNorm2d + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + +def resnet18(pretrained=False, **kwargs): + """Constructs a ResNet-18 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) + return model + +def resnet101(pretrained=False, **kwargs): + """Constructs a ResNet-101 model. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + """ + model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs) + if pretrained: + model.load_state_dict(model_zoo.load_url(model_urls['resnet101'])) + return model + ################################## Loss function ################################## class HingeLossD(nn.Module): def __init__(self): @@ -114,7 +327,7 @@ def __init__(self): super(HingeLossG, self).__init__() def forward(self, dis_fake): - loss_fake = F.relu(-torch.mean(dis_fake)) + loss_fake = -torch.mean(dis_fake) return loss_fake class VGGLoss(nn.Module): diff --git a/models/runmodel.py b/models/runmodel.py index bba1fe4..3e97fee 100755 --- a/models/runmodel.py +++ b/models/runmodel.py @@ -7,11 +7,11 @@ import torch import numpy as np -def run_segment(img,net,size = 360,gpu_id = 0): +def run_segment(img,net,size = 360,gpu_id = '-1'): img = impro.resize(img,size) - img = data.im2tensor(img,gpu_id = gpu_id, bgr2rgb = False,use_transform = False , is0_1 = True) + img = data.im2tensor(img,gpu_id = gpu_id, bgr2rgb = False, is0_1 = True) mask = net(img) - mask = data.tensor2im(mask, gray=True,rgb2bgr = False, is0_1 = True) + mask = data.tensor2im(mask, gray=True, is0_1 = True) return mask def run_pix2pix(img,net,opt): @@ -50,12 +50,12 @@ def run_styletransfer(opt, net, img): else: canny_low = opt.canny-int(opt.canny/2) canny_high = opt.canny+int(opt.canny/2) - img = cv2.Canny(img,opt.canny-50,opt.canny+50) + img = cv2.Canny(img,canny_low,canny_high) if opt.only_edges: return img - img = data.im2tensor(img,gpu_id=opt.gpu_id,gray=True,use_transform = False,is0_1 = False) + img = data.im2tensor(img,gpu_id=opt.gpu_id,gray=True) else: - img = data.im2tensor(img,gpu_id=opt.gpu_id,gray=False,use_transform = True) + img = data.im2tensor(img,gpu_id=opt.gpu_id) img = net(img) img = data.tensor2im(img) return img diff --git a/train/clean/train.py b/train/clean/train.py index 1cba002..fbcd2b3 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -31,15 +31,19 @@ opt.parser.add_argument('--finesize',type=int,default=256, help='') opt.parser.add_argument('--loadsize',type=int,default=286, help='') opt.parser.add_argument('--batchsize',type=int,default=1, help='') +opt.parser.add_argument('--no_gan', action='store_true', help='if specified, do not use gan') +opt.parser.add_argument('--n_layers_D',type=int,default=1, help='') +opt.parser.add_argument('--num_D',type=int,default=3, help='') opt.parser.add_argument('--lambda_L2',type=float,default=100, help='') opt.parser.add_argument('--lambda_VGG',type=float,default=1, help='') opt.parser.add_argument('--lambda_GAN',type=float,default=1, help='') +opt.parser.add_argument('--lambda_D',type=float,default=1, help='') opt.parser.add_argument('--load_thread',type=int,default=4, help='number of thread for loading data') opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') opt.parser.add_argument('--dataset_test',type=str,default='./datasets/face_test/', help='') opt.parser.add_argument('--n_epoch',type=int,default=200, help='') -opt.parser.add_argument('--save_freq',type=int,default=100000, help='') +opt.parser.add_argument('--save_freq',type=int,default=10000, help='') opt.parser.add_argument('--continue_train', action='store_true', help='') opt.parser.add_argument('--savename',type=str,default='face', help='') opt.parser.add_argument('--showresult_freq',type=int,default=1000, help='') @@ -84,16 +88,16 @@ def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): ''' if opt.gpu_id != '-1' and len(opt.gpu_id) == 1: torch.backends.cudnn.benchmark = True -netG = BVDNet.define_G(opt.N,gpu_id=opt.gpu_id) -netD = BVDNet.define_D(gpu_id=opt.gpu_id) +netG = BVDNet.define_G(opt.N,gpu_id=opt.gpu_id) optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) -optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) - lossfun_L2 = nn.MSELoss() lossfun_VGG = model_util.VGGLoss(opt.gpu_id) -lossfun_GAND = BVDNet.GANLoss('D') -lossfun_GANG = BVDNet.GANLoss('G') +if not opt.no_gan: + netD = BVDNet.define_D(n_layers_D=opt.n_layers_D,num_D=opt.num_D,gpu_id=opt.gpu_id) + optimizer_D = torch.optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) + lossfun_GAND = BVDNet.GANLoss('D') + lossfun_GANG = BVDNet.GANLoss('G') ''' --------------------------Init DataLoader-------------------------- @@ -130,33 +134,42 @@ def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): # Fake Generator out = netG(mosaic_stream,previous_frame) # Discriminator - dis_real = netD(torch.cat((mosaic_stream[:,:,opt.N],ori_stream[:,:,opt.N].detach()),dim=1)) - dis_fake_D = netD(torch.cat((mosaic_stream[:,:,opt.N],out.detach()),dim=1)) - loss_D = lossfun_GAND(dis_fake_D,dis_real) * opt.lambda_GAN + if not opt.no_gan: + dis_real = netD(torch.cat((mosaic_stream[:,:,opt.N],ori_stream[:,:,opt.N].detach()),dim=1)) + dis_fake_D = netD(torch.cat((mosaic_stream[:,:,opt.N],out.detach()),dim=1)) + loss_D = lossfun_GAND(dis_fake_D,dis_real) * opt.lambda_GAN * opt.lambda_D # Generator - dis_fake_G = netD(torch.cat((mosaic_stream[:,:,opt.N],out),dim=1)) loss_L2 = lossfun_L2(out,ori_stream[:,:,opt.N]) * opt.lambda_L2 loss_VGG = lossfun_VGG(out,ori_stream[:,:,opt.N]) * opt.lambda_VGG - loss_GANG = lossfun_GANG(dis_fake_G) * opt.lambda_GAN - loss_G = loss_L2+loss_VGG+loss_GANG + loss_G = loss_L2+loss_VGG + if not opt.no_gan: + dis_fake_G = netD(torch.cat((mosaic_stream[:,:,opt.N],out),dim=1)) + loss_GANG = lossfun_GANG(dis_fake_G) * opt.lambda_GAN + loss_G = loss_G + loss_GANG ############### Backward Pass #################### optimizer_G.zero_grad() loss_G.backward() optimizer_G.step() - optimizer_D.zero_grad() - loss_D.backward() - optimizer_D.step() - previous_predframe_tmp = out.detach().cpu().numpy() + if not opt.no_gan: + optimizer_D.zero_grad() + loss_D.backward() + optimizer_D.step() - TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item(), - 'loss_D':loss_D.item(),'loss_G':loss_G.item()}, train_iter) + previous_predframe_tmp = out.detach().cpu().numpy() + + if not opt.no_gan: + TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item(), + 'loss_D':loss_D.item(),'loss_G':loss_G.item()}, train_iter) + else: + TBGlobalWriter.add_scalars('loss/train', {'L2':loss_L2.item(),'VGG':loss_VGG.item()}, train_iter) # save network if train_iter%opt.save_freq == 0 and train_iter != 0: model_util.save(netG, os.path.join('checkpoints',opt.savename,str(train_iter)+'_G.pth'), opt.gpu_id) - model_util.save(netD, os.path.join('checkpoints',opt.savename,str(train_iter)+'_D.pth'), opt.gpu_id) + if not opt.no_gan: + model_util.save(netD, os.path.join('checkpoints',opt.savename,str(train_iter)+'_D.pth'), opt.gpu_id) # Image quality evaluation if train_iter%(opt.showresult_freq//10) == 0: @@ -213,7 +226,7 @@ def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): mosaic_stream.append(_mosaic) if step == 0: previous = impro.imread(os.path.join(opt.dataset_test,video,'image',frames[opt.N*opt.S-1]),loadsize=opt.finesize,rgb=True) - previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id,use_transform = False, is0_1 = False) + previous = data.im2tensor(previous,bgr2rgb = False, gpu_id = opt.gpu_id, is0_1 = False) mosaic_stream = (np.array(mosaic_stream).astype(np.float32)/255.0-0.5)/0.5 mosaic_stream = mosaic_stream.reshape(1,opt.T,opt.finesize,opt.finesize,3).transpose((0,4,1,2,3)) mosaic_stream = data.to_tensor(mosaic_stream, opt.gpu_id) diff --git a/util/data.py b/util/data.py index 888d5c7..7d472a3 100755 --- a/util/data.py +++ b/util/data.py @@ -6,11 +6,6 @@ import cv2 from . import image_processing as impro from . import degradater -transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean = (0.5, 0.5, 0.5), std = (0.5, 0.5, 0.5)) - ] -) def to_tensor(data,gpu_id): data = torch.from_numpy(data) @@ -18,8 +13,7 @@ def to_tensor(data,gpu_id): data = data.cuda() return data - -def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = False, batch_index=0): +def tensor2im(image_tensor, gray=False, rgb2bgr = True ,is0_1 = False, batch_index=0): image_tensor =image_tensor.data image_numpy = image_tensor[batch_index].cpu().float().numpy() @@ -31,7 +25,7 @@ def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = if gray: h, w = image_numpy.shape[1:] image_numpy = image_numpy.reshape(h,w) - return image_numpy.astype(imtype) + return image_numpy.astype(np.uint8) # output 3ch if image_numpy.shape[0] == 1: @@ -39,11 +33,10 @@ def tensor2im(image_tensor, imtype=np.uint8, gray=False, rgb2bgr = True ,is0_1 = image_numpy = image_numpy.transpose((1, 2, 0)) if rgb2bgr and not gray: image_numpy = image_numpy[...,::-1]-np.zeros_like(image_numpy) - return image_numpy.astype(imtype) + return image_numpy.astype(np.uint8) -def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = True, gpu_id = 0, use_transform = True,is0_1 = True): - +def im2tensor(image_numpy, gray=False,bgr2rgb = True, reshape = True, gpu_id = '-1',is0_1 = False): if gray: h, w = image_numpy.shape image_numpy = (image_numpy/255.0-0.5)/0.5 @@ -54,15 +47,12 @@ def im2tensor(image_numpy, imtype=np.uint8, gray=False,bgr2rgb = True, reshape = h, w ,ch = image_numpy.shape if bgr2rgb: image_numpy = image_numpy[...,::-1]-np.zeros_like(image_numpy) - if use_transform: - image_tensor = transform(image_numpy) + if is0_1: + image_numpy = image_numpy/255.0 else: - if is0_1: - image_numpy = image_numpy/255.0 - else: - image_numpy = (image_numpy/255.0-0.5)/0.5 - image_numpy = image_numpy.transpose((2, 0, 1)) - image_tensor = torch.from_numpy(image_numpy).float() + image_numpy = (image_numpy/255.0-0.5)/0.5 + image_numpy = image_numpy.transpose((2, 0, 1)) + image_tensor = torch.from_numpy(image_numpy).float() if reshape: image_tensor = image_tensor.reshape(1,ch,h,w) if gpu_id != '-1': @@ -75,7 +65,6 @@ def shuffledata(data,target): np.random.set_state(state) np.random.shuffle(target) - def random_transform_single_mask(img,out_shape): out_h,out_w = out_shape img = cv2.resize(img,(int(out_w*random.uniform(1.1, 1.5)),int(out_h*random.uniform(1.1, 1.5)))) @@ -105,7 +94,7 @@ def get_transform_params(): color_rate = [np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05), np.random.uniform(-0.05,0.05),np.random.uniform(-0.05,0.05)] flip_rate = np.random.random() - degradate_params = degradater.get_random_degenerate_params(mod='weaker_1') + degradate_params = degradater.get_random_degenerate_params(mod='weaker_2') rate_dict = {'crop':crop_rate,'rotat':rotat_rate,'color':color_rate,'flip':flip_rate,'degradate':degradate_params} return {'flag':flag_dict,'rate':rate_dict} @@ -113,6 +102,9 @@ def get_transform_params(): def random_transform_single_image(img,finesize,params=None,test_flag = False): if params is None: params = get_transform_params() + + if params['flag']['degradate']: + img = degradater.degradate(img,params['rate']['degradate']) if params['flag']['crop']: h,w = img.shape[:2] @@ -135,9 +127,6 @@ def random_transform_single_image(img,finesize,params=None,test_flag = False): if params['flag']['flip']: img = img[:,::-1,:] - if params['flag']['degradate']: - img = degradater.degradate(img,params['rate']['degradate']) - #check shape if img.shape[0]!= finesize or img.shape[1]!= finesize: img = cv2.resize(img,(finesize,finesize)) diff --git a/util/dataloader.py b/util/dataloader.py index 3c9effe..7d2206c 100644 --- a/util/dataloader.py +++ b/util/dataloader.py @@ -28,17 +28,11 @@ def __init__(self, opt, video_dir, test_flag=False): feg_mask = impro.imread(os.path.join(video_dir,'mask','00001.png'),mod='gray',loadsize=self.opt.loadsize) self.mosaic_size,self.mod,self.rect_rat,self.feather = mosaic.get_random_parameter(feg_ori,feg_mask) self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] - + self.loadsize = self.opt.loadsize #Init load pool for i in range(self.opt.S*self.opt.T): - # random - if np.random.random()<0.05: - self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] - if np.random.random()<0.02: - self.transform_params['rate']['crop'] = [np.random.random(),np.random.random()] - - _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) - _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.opt.loadsize) + _ori_img = impro.imread(os.path.join(video_dir,'origin_image','%05d' % (i+1)+'.jpg'),loadsize=self.loadsize,rgb=True) + _mask = impro.imread(os.path.join(video_dir,'mask','%05d' % (i+1)+'.png' ),mod='gray',loadsize=self.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) _ori_img = data.random_transform_single_image(_ori_img,opt.finesize,self.transform_params) _mosaic_img = data.random_transform_single_image(_mosaic_img,opt.finesize,self.transform_params) @@ -70,13 +64,21 @@ def anti_normalize(self,data): return np.clip((data*0.5+0.5)*255,0,255).astype(np.uint8) def next(self): + # random + if np.random.random()<0.05: + self.startpos = [random.randint(0,self.mosaic_size),random.randint(0,self.mosaic_size)] + if np.random.random()<0.02: + self.transform_params['rate']['crop'] = [np.random.random(),np.random.random()] + if np.random.random()<0.02: + self.loadsize = np.random.randint(self.opt.finesize,self.opt.loadsize) + if self.t != 0: self.previous_pred = None self.ori_load_pool [:self.opt.S*self.opt.T-1] = self.ori_load_pool [1:self.opt.S*self.opt.T] self.mosaic_load_pool[:self.opt.S*self.opt.T-1] = self.mosaic_load_pool[1:self.opt.S*self.opt.T] #print(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg')) - _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.opt.loadsize,rgb=True) - _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.opt.loadsize) + _ori_img = impro.imread(os.path.join(self.video_dir,'origin_image','%05d' % (self.opt.S*self.opt.T+self.t)+'.jpg'),loadsize=self.loadsize,rgb=True) + _mask = impro.imread(os.path.join(self.video_dir,'mask','%05d' % (self.opt.S*self.opt.T+self.t)+'.png' ),mod='gray',loadsize=self.loadsize) _mosaic_img = mosaic.addmosaic_base(_ori_img, _mask, self.mosaic_size,0, self.mod,self.rect_rat,self.feather,self.startpos) _ori_img = data.random_transform_single_image(_ori_img,self.opt.finesize,self.transform_params) _mosaic_img = data.random_transform_single_image(_mosaic_img,self.opt.finesize,self.transform_params) diff --git a/util/degradater.py b/util/degradater.py index 15b2e7f..9e4d227 100644 --- a/util/degradater.py +++ b/util/degradater.py @@ -98,7 +98,7 @@ def get_random_degenerate_params(mod='strong'): return params -def degradate(img,params,jpeg_last = False): +def degradate(img,params,jpeg_last = True): shape = img.shape if not params: params = get_random_degenerate_params('original') From 65f48925ba0fc5cdcfb83172f2ef818b82083927 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Sun, 25 Apr 2021 19:11:56 +0800 Subject: [PATCH 8/9] Completed the video core and faster feather --- cores/core.py | 60 ++++++----- cores/options.py | 94 +++++++++-------- deepmosaic.py | 5 +- models/loadmodel.py | 23 ++--- models/videoHD_model.py | 173 ------------------------------- models/video_model.py | 216 --------------------------------------- train/clean/train.py | 3 +- util/data.py | 9 ++ util/image_processing.py | 44 ++++---- 9 files changed, 132 insertions(+), 495 deletions(-) delete mode 100644 models/videoHD_model.py delete mode 100644 models/video_model.py diff --git a/cores/core.py b/cores/core.py index fab7da2..e990a21 100644 --- a/cores/core.py +++ b/cores/core.py @@ -244,11 +244,15 @@ def cleanmosaic_video_byframe(opt,netG,netM): def cleanmosaic_video_fusion(opt,netG,netM): path = opt.media_path - N = 25 - if 'HD' in os.path.basename(opt.model_path): - INPUT_SIZE = 256 - else: - INPUT_SIZE = 128 + N,T,S = 2,5,3 + LEFT_FRAME = (N*S) + POOL_NUM = LEFT_FRAME*2+1 + INPUT_SIZE = 256 + FRAME_POS = np.linspace(0, (T-1)*S,T,dtype=np.int64) + img_pool = [] + previous_frame = None + init_flag = True + fps,imagepaths,height,width = video_init(opt,path) positions = get_mosaic_positions(opt,netM,imagepaths,savemask=True) t1 = time.time() @@ -258,37 +262,41 @@ def cleanmosaic_video_fusion(opt,netG,netM): # clean mosaic print('Clean Mosaic:') length = len(imagepaths) - - img_pool = [] - mosaic_input = np.zeros((INPUT_SIZE,INPUT_SIZE,3*N+1), dtype='uint8') - + for i,imagepath in enumerate(imagepaths,0): x,y,size = positions[i][0],positions[i][1],positions[i][2] - + input_stream = [] # image read stream - mask = cv2.imread(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath),0) - if i==0 : - for j in range(0,N): - img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+j-12,0,len(imagepaths)-1)]))) - else: + if i==0 :# init + for j in range(POOL_NUM): + img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+j-LEFT_FRAME,0,len(imagepaths)-1)]))) + else: # load next frame img_pool.pop(0) - img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+12,0,len(imagepaths)-1)]))) - img_origin = img_pool[12] + img_pool.append(impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepaths[np.clip(i+LEFT_FRAME,0,len(imagepaths)-1)]))) + img_origin = img_pool[LEFT_FRAME] img_result = img_origin.copy() if size>100: try:#Avoid unknown errors - #reshape to network input shape - for k in range(N): - mosaic_input[:,:,k*3:(k+1)*3] = impro.resize(img_pool[k][y-size:y+size,x-size:x+size], INPUT_SIZE) - mask_input = impro.resize(mask,np.min(img_origin.shape[:2]))[y-size:y+size,x-size:x+size] - mosaic_input[:,:,-1] = impro.resize(mask_input, INPUT_SIZE) - mosaic_input_tensor = data.im2tensor(mosaic_input,bgr2rgb=False,gpu_id=opt.gpu_id) - unmosaic_pred = netG(mosaic_input_tensor) - img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = False) + for pos in FRAME_POS: + input_stream.append(impro.resize(img_pool[pos][y-size:y+size,x-size:x+size], INPUT_SIZE)[:,:,::-1]) + if init_flag: + init_flag = False + previous_frame = input_stream[N] + previous_frame = data.im2tensor(previous_frame,bgr2rgb=True,gpu_id=opt.gpu_id) + + input_stream = np.array(input_stream).reshape(1,T,INPUT_SIZE,INPUT_SIZE,3).transpose((0,4,1,2,3)) + input_stream = data.to_tensor(data.normalize(input_stream),gpu_id=opt.gpu_id) + unmosaic_pred = netG(input_stream,previous_frame) + img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = True) + previous_frame = unmosaic_pred + mask = cv2.imread(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath),0) img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather) except Exception as e: - print('Warning:',e) + init_flag = True + print('Error:',e) + else: + init_flag = True cv2.imwrite(os.path.join(opt.temp_dir+'/replace_mosaic',imagepath),img_result) os.remove(os.path.join(opt.temp_dir+'/video2image',imagepath)) diff --git a/cores/options.py b/cores/options.py index 58759f8..f1d612b 100644 --- a/cores/options.py +++ b/cores/options.py @@ -11,6 +11,7 @@ def __init__(self): def initialize(self): #base + self.parser.add_argument('--debug', action='store_true', help='if specified, start debug mode') self.parser.add_argument('--gpu_id', type=str,default='0', help='if -1, use cpu') self.parser.add_argument('--media_path', type=str, default='./imgs/ruoruo.jpg',help='your videos or images path') self.parser.add_argument('-ss', '--start_time', type=str, default='00:00:00',help='start position of video, default is the beginning of video') @@ -58,8 +59,7 @@ def getparse(self, test_flag = False): model_name = os.path.basename(self.opt.model_path) self.opt.temp_dir = os.path.join(self.opt.temp_dir, 'DeepMosaics_temp') - - + if self.opt.gpu_id != '-1': os.environ["CUDA_VISIBLE_DEVICES"] = str(self.opt.gpu_id) import torch @@ -70,53 +70,59 @@ def getparse(self, test_flag = False): if test_flag: if not os.path.exists(self.opt.media_path): - print('Error: Bad media path!') - input('Please press any key to exit.\n') - sys.exit(0) - - if self.opt.mode == 'auto': - if 'clean' in model_name or self.opt.traditional: - self.opt.mode = 'clean' - elif 'add' in model_name: - self.opt.mode = 'add' - elif 'style' in model_name or 'edges' in model_name: - self.opt.mode = 'style' - else: - print('Please input running model!') + print('Error: Media does not exist!') input('Please press any key to exit.\n') sys.exit(0) - - if self.opt.output_size == 0 and self.opt.mode == 'style': - self.opt.output_size = 512 - - if 'edges' in model_name or 'edges' in self.opt.preprocess: - self.opt.edges = True - - if self.opt.netG == 'auto' and self.opt.mode =='clean': - if 'unet_128' in model_name: - self.opt.netG = 'unet_128' - elif 'resnet_9blocks' in model_name: - self.opt.netG = 'resnet_9blocks' - elif 'HD' in model_name and 'video' not in model_name: - self.opt.netG = 'HD' - elif 'video' in model_name: - self.opt.netG = 'video' - else: - print('Type of Generator error!') + if not os.path.exists(self.opt.model_path): + print('Error: Model does not exist!') input('Please press any key to exit.\n') sys.exit(0) - if self.opt.ex_mult == 'auto': - if 'face' in model_name: - self.opt.ex_mult = 1.1 + if self.opt.mode == 'auto': + if 'clean' in model_name or self.opt.traditional: + self.opt.mode = 'clean' + elif 'add' in model_name: + self.opt.mode = 'add' + elif 'style' in model_name or 'edges' in model_name: + self.opt.mode = 'style' + else: + print('Please check model_path!') + input('Please press any key to exit.\n') + sys.exit(0) + + if self.opt.output_size == 0 and self.opt.mode == 'style': + self.opt.output_size = 512 + + if 'edges' in model_name or 'edges' in self.opt.preprocess: + self.opt.edges = True + + if self.opt.netG == 'auto' and self.opt.mode =='clean': + if 'unet_128' in model_name: + self.opt.netG = 'unet_128' + elif 'resnet_9blocks' in model_name: + self.opt.netG = 'resnet_9blocks' + elif 'video' in model_name: + self.opt.netG = 'video' + else: + print('Type of Generator error!') + input('Please press any key to exit.\n') + sys.exit(0) + + if self.opt.ex_mult == 'auto': + if 'face' in model_name: + self.opt.ex_mult = 1.1 + else: + self.opt.ex_mult = 1.5 else: - self.opt.ex_mult = 1.5 - else: - self.opt.ex_mult = float(self.opt.ex_mult) - - if self.opt.mosaic_position_model_path == 'auto': - _path = os.path.join(os.path.split(self.opt.model_path)[0],'mosaic_position.pth') - self.opt.mosaic_position_model_path = _path - # print(self.opt.mosaic_position_model_path) + self.opt.ex_mult = float(self.opt.ex_mult) + + if self.opt.mosaic_position_model_path == 'auto': + _path = os.path.join(os.path.split(self.opt.model_path)[0],'mosaic_position.pth') + if os.path.isfile(_path): + self.opt.mosaic_position_model_path = _path + else: + input('Please check mosaic_position_model_path!') + input('Please press any key to exit.\n') + sys.exit(0) return self.opt \ No newline at end of file diff --git a/deepmosaic.py b/deepmosaic.py index 2571d38..1d5c4e6 100644 --- a/deepmosaic.py +++ b/deepmosaic.py @@ -68,8 +68,11 @@ def main(): print('This type of file is not supported') util.clean_tempfiles(opt, tmp_init = False) - + if __name__ == '__main__': + if opt.debug: + main() + sys.exit(0) try: main() print('Finished!') diff --git a/models/loadmodel.py b/models/loadmodel.py index 8e5ed1c..16124c4 100755 --- a/models/loadmodel.py +++ b/models/loadmodel.py @@ -1,23 +1,23 @@ import torch from . import model_util -from .pix2pix_model import define_G -from .pix2pixHD_model import define_G as define_G_HD -from .video_model import MosaicNet -from .videoHD_model import MosaicNet as MosaicNet_HD +from .pix2pix_model import define_G as pix2pix_G +from .pix2pixHD_model import define_G as pix2pixHD_G +# from .video_model import MosaicNet +# from .videoHD_model import MosaicNet as MosaicNet_HD from .BiSeNet_model import BiSeNet +from .BVDNet import define_G as video_G def show_paramsnumber(net,netname='net'): parameters = sum(param.numel() for param in net.parameters()) parameters = round(parameters/1e6,2) print(netname+' parameters: '+str(parameters)+'M') - def pix2pix(opt): # print(opt.model_path,opt.netG) if opt.netG == 'HD': - netG = define_G_HD(3, 3, 64, 'global' ,4) + netG = pix2pixHD_G(3, 3, 64, 'global' ,4) else: - netG = define_G(3, 3, 64, opt.netG, norm='batch',use_dropout=True, init_type='normal', gpu_ids=[]) + netG = pix2pix_G(3, 3, 64, opt.netG, norm='batch',use_dropout=True, init_type='normal', gpu_ids=[]) show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) netG = model_util.todevice(netG,opt.gpu_id) @@ -27,9 +27,9 @@ def pix2pix(opt): def style(opt): if opt.edges: - netG = define_G(1, 3, 64, 'resnet_9blocks', norm='instance',use_dropout=True, init_type='normal', gpu_ids=[]) + netG = pix2pix_G(1, 3, 64, 'resnet_9blocks', norm='instance',use_dropout=True, init_type='normal', gpu_ids=[]) else: - netG = define_G(3, 3, 64, 'resnet_9blocks', norm='instance',use_dropout=False, init_type='normal', gpu_ids=[]) + netG = pix2pix_G(3, 3, 64, 'resnet_9blocks', norm='instance',use_dropout=False, init_type='normal', gpu_ids=[]) #in other to load old pretrain model #https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/models/base_model.py @@ -51,10 +51,7 @@ def style(opt): return netG def video(opt): - if 'HD' in opt.model_path: - netG = MosaicNet_HD(3*25+1, 3, norm='instance') - else: - netG = MosaicNet(3*25+1, 3,norm = 'batch') + netG = video_G(N=2,n_blocks=1,gpu_id=opt.gpu_id) show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) netG = model_util.todevice(netG,opt.gpu_id) diff --git a/models/videoHD_model.py b/models/videoHD_model.py deleted file mode 100644 index 20e901f..0000000 --- a/models/videoHD_model.py +++ /dev/null @@ -1,173 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from .pix2pixHD_model import * - - -class encoder_2d(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect'): - assert(n_blocks >= 0) - super(encoder_2d, self).__init__() - activation = nn.ReLU(True) - - model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation] - ### downsample - for i in range(n_downsampling): - mult = 2**i - model += [nn.ReflectionPad2d(1),nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=0), - norm_layer(ngf * mult * 2), activation] - - self.model = nn.Sequential(*model) - def forward(self, input): - return self.model(input) - -class decoder_2d(nn.Module): - def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - padding_type='reflect'): - assert(n_blocks >= 0) - super(decoder_2d, self).__init__() - activation = nn.ReLU(True) - - model = [] - - ### resnet blocks - mult = 2**n_downsampling - for i in range(n_blocks): - model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)] - - ### upsample - for i in range(n_downsampling): - mult = 2**(n_downsampling - i) - - # model += [ nn.Upsample(scale_factor = 2, mode='nearest'), - # nn.ReflectionPad2d(1), - # nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), - # norm_layer(int(ngf * mult / 2)), - # nn.ReLU(True)] - model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1), - norm_layer(int(ngf * mult / 2)), activation] - model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()] - self.model = nn.Sequential(*model) - - def forward(self, input): - return self.model(input) - - -class conv_3d(nn.Module): - def __init__(self,inchannel,outchannel,kernel_size=3,stride=2,padding=1,norm_layer_3d=nn.BatchNorm3d,use_bias=True): - super(conv_3d, self).__init__() - self.conv = nn.Sequential( - nn.Conv3d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias), - norm_layer_3d(outchannel), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - x = self.conv(x) - return x - -class conv_2d(nn.Module): - def __init__(self,inchannel,outchannel,kernel_size=3,stride=1,padding=1,norm_layer_2d=nn.BatchNorm2d,use_bias=True): - super(conv_2d, self).__init__() - self.conv = nn.Sequential( - nn.ReflectionPad2d(padding), - nn.Conv2d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=0, bias=use_bias), - norm_layer_2d(outchannel), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - x = self.conv(x) - return x - - -class encoder_3d(nn.Module): - def __init__(self,in_channel,norm_layer_2d,norm_layer_3d,use_bias): - super(encoder_3d, self).__init__() - self.inconv = conv_3d(1, 64, 7, 2, 3,norm_layer_3d,use_bias) - self.down1 = conv_3d(64, 128, 3, 2, 1,norm_layer_3d,use_bias) - self.down2 = conv_3d(128, 256, 3, 2, 1,norm_layer_3d,use_bias) - self.down3 = conv_3d(256, 512, 3, 2, 1,norm_layer_3d,use_bias) - self.down4 = conv_3d(512, 1024, 3, 1, 1,norm_layer_3d,use_bias) - self.pool = nn.AvgPool3d((5,1,1)) - # self.conver2d = nn.Sequential( - # nn.Conv2d(256*int(in_channel/4), 256, kernel_size=3, stride=1, padding=1, bias=use_bias), - # norm_layer_2d(256), - # nn.ReLU(inplace=True), - # ) - - - def forward(self, x): - - x = x.view(x.size(0),1,x.size(1),x.size(2),x.size(3)) - x = self.inconv(x) - x = self.down1(x) - x = self.down2(x) - x = self.down3(x) - x = self.down4(x) - #print(x.size()) - x = self.pool(x) - #print(x.size()) - # torch.Size([1, 1024, 16, 16]) - # torch.Size([1, 512, 5, 16, 16]) - - - x = x.view(x.size(0),x.size(1),x.size(3),x.size(4)) - - # x = self.conver2d(x) - - return x - - # def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, - # padding_type='reflect') - -class ALL(nn.Module): - def __init__(self, in_channel, out_channel,norm_layer_2d,norm_layer_3d,use_bias): - super(ALL, self).__init__() - - self.encoder_2d = encoder_2d(4,3,64,4,norm_layer=norm_layer_2d,padding_type='reflect') - self.encoder_3d = encoder_3d(in_channel,norm_layer_2d,norm_layer_3d,use_bias) - self.decoder_2d = decoder_2d(4,3,64,4,norm_layer=norm_layer_2d,padding_type='reflect') - # self.shortcut_cov = conv_2d(3,64,7,1,3,norm_layer_2d,use_bias) - self.merge1 = conv_2d(2048,1024,3,1,1,norm_layer_2d,use_bias) - # self.merge2 = nn.Sequential( - # conv_2d(128,64,3,1,1,norm_layer_2d,use_bias), - # nn.ReflectionPad2d(3), - # nn.Conv2d(64, out_channel, kernel_size=7, padding=0), - # nn.Tanh() - # ) - - def forward(self, x): - - N = int((x.size()[1])/3) - x_2d = torch.cat((x[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:], x[:,N-1:N,:,:]), 1) - #shortcut_2d = x[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:] - - x_2d = self.encoder_2d(x_2d) - x_3d = self.encoder_3d(x) - #x = x_2d + x_3d - x = torch.cat((x_2d,x_3d),1) - x = self.merge1(x) - - x = self.decoder_2d(x) - #shortcut_2d = self.shortcut_cov(shortcut_2d) - #x = torch.cat((x,shortcut_2d),1) - #x = self.merge2(x) - - return x - -def MosaicNet(in_channel, out_channel, norm='batch'): - - if norm == 'batch': - # norm_layer_2d = nn.BatchNorm2d - # norm_layer_3d = nn.BatchNorm3d - norm_layer_2d = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) - norm_layer_3d = functools.partial(nn.BatchNorm3d, affine=True, track_running_stats=True) - use_bias = False - elif norm == 'instance': - norm_layer_2d = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) - norm_layer_3d = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=False) - use_bias = True - - return ALL(in_channel, out_channel, norm_layer_2d, norm_layer_3d, use_bias) diff --git a/models/video_model.py b/models/video_model.py deleted file mode 100644 index 4a095c6..0000000 --- a/models/video_model.py +++ /dev/null @@ -1,216 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -from .pix2pix_model import * - - -class encoder_2d(nn.Module): - """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. - - We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) - """ - - def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): - """Construct a Resnet-based generator - - Parameters: - input_nc (int) -- the number of channels in input images - output_nc (int) -- the number of channels in output images - ngf (int) -- the number of filters in the last conv layer - norm_layer -- normalization layer - use_dropout (bool) -- if use dropout layers - n_blocks (int) -- the number of ResNet blocks - padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero - """ - assert(n_blocks >= 0) - super(encoder_2d, self).__init__() - if type(norm_layer) == functools.partial: - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - - model = [nn.ReflectionPad2d(3), - nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias), - norm_layer(ngf), - nn.ReLU(True)] - - n_downsampling = 2 - for i in range(n_downsampling): # add downsampling layers - mult = 2 ** i - model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias), - norm_layer(ngf * mult * 2), - nn.ReLU(True)] - #torch.Size([1, 256, 32, 32]) - - self.model = nn.Sequential(*model) - - def forward(self, input): - """Standard forward""" - return self.model(input) - - -class decoder_2d(nn.Module): - """Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations. - - We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style) - """ - - def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'): - """Construct a Resnet-based generator - - Parameters: - input_nc (int) -- the number of channels in input images - output_nc (int) -- the number of channels in output images - ngf (int) -- the number of filters in the last conv layer - norm_layer -- normalization layer - use_dropout (bool) -- if use dropout layers - n_blocks (int) -- the number of ResNet blocks - padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero - """ - super(decoder_2d, self).__init__() - if type(norm_layer) == functools.partial: - use_bias = norm_layer.func == nn.InstanceNorm2d - else: - use_bias = norm_layer == nn.InstanceNorm2d - - model = [] - - n_downsampling = 2 - mult = 2 ** n_downsampling - for i in range(n_blocks): # add ResNet blocks - model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)] - #torch.Size([1, 256, 32, 32]) - - for i in range(n_downsampling): # add upsampling layers - mult = 2 ** (n_downsampling - i) - # model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), - # kernel_size=3, stride=2, - # padding=1, output_padding=1, - # bias=use_bias), - # norm_layer(int(ngf * mult / 2)), - # nn.ReLU(True)] - #https://distill.pub/2016/deconv-checkerboard/ - #https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/issues/190 - - model += [ nn.Upsample(scale_factor = 2, mode='nearest'), - nn.ReflectionPad2d(1), - nn.Conv2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=1, padding=0), - norm_layer(int(ngf * mult / 2)), - nn.ReLU(True)] - # model += [nn.ReflectionPad2d(3)] - # model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)] - # model += [nn.Tanh()] - # model += [nn.Sigmoid()] - - self.model = nn.Sequential(*model) - - def forward(self, input): - """Standard forward""" - return self.model(input) - - - -class conv_3d(nn.Module): - def __init__(self,inchannel,outchannel,kernel_size=3,stride=2,padding=1,norm_layer_3d=nn.BatchNorm3d,use_bias=True): - super(conv_3d, self).__init__() - self.conv = nn.Sequential( - nn.Conv3d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=padding, bias=use_bias), - norm_layer_3d(outchannel), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - x = self.conv(x) - return x - -class conv_2d(nn.Module): - def __init__(self,inchannel,outchannel,kernel_size=3,stride=1,padding=1,norm_layer_2d=nn.BatchNorm2d,use_bias=True): - super(conv_2d, self).__init__() - self.conv = nn.Sequential( - nn.ReflectionPad2d(padding), - nn.Conv2d(inchannel, outchannel, kernel_size=kernel_size, stride=stride, padding=0, bias=use_bias), - norm_layer_2d(outchannel), - nn.ReLU(inplace=True), - ) - - def forward(self, x): - x = self.conv(x) - return x - - -class encoder_3d(nn.Module): - def __init__(self,in_channel,norm_layer_2d,norm_layer_3d,use_bias): - super(encoder_3d, self).__init__() - self.down1 = conv_3d(1, 64, 3, 2, 1,norm_layer_3d,use_bias) - self.down2 = conv_3d(64, 128, 3, 2, 1,norm_layer_3d,use_bias) - self.down3 = conv_3d(128, 256, 3, 1, 1,norm_layer_3d,use_bias) - self.conver2d = nn.Sequential( - nn.Conv2d(256*int(in_channel/4), 256, kernel_size=3, stride=1, padding=1, bias=use_bias), - norm_layer_2d(256), - nn.ReLU(inplace=True), - ) - - - def forward(self, x): - - x = x.view(x.size(0),1,x.size(1),x.size(2),x.size(3)) - x = self.down1(x) - x = self.down2(x) - x = self.down3(x) - - x = x.view(x.size(0),x.size(1)*x.size(2),x.size(3),x.size(4)) - - x = self.conver2d(x) - - return x - - - -class ALL(nn.Module): - def __init__(self, in_channel, out_channel,norm_layer_2d,norm_layer_3d,use_bias): - super(ALL, self).__init__() - - self.encoder_2d = encoder_2d(4,-1,64,norm_layer=norm_layer_2d,n_blocks=9) - self.encoder_3d = encoder_3d(in_channel,norm_layer_2d,norm_layer_3d,use_bias) - self.decoder_2d = decoder_2d(4,3,64,norm_layer=norm_layer_2d,n_blocks=9) - self.shortcut_cov = conv_2d(3,64,7,1,3,norm_layer_2d,use_bias) - self.merge1 = conv_2d(512,256,3,1,1,norm_layer_2d,use_bias) - self.merge2 = nn.Sequential( - conv_2d(128,64,3,1,1,norm_layer_2d,use_bias), - nn.ReflectionPad2d(3), - nn.Conv2d(64, out_channel, kernel_size=7, padding=0), - nn.Tanh() - ) - - def forward(self, x): - - N = int((x.size()[1])/3) - x_2d = torch.cat((x[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:], x[:,N-1:N,:,:]), 1) - shortcut_2d = x[:,int((N-1)/2)*3:(int((N-1)/2)+1)*3,:,:] - - x_2d = self.encoder_2d(x_2d) - - x_3d = self.encoder_3d(x) - x = torch.cat((x_2d,x_3d),1) - x = self.merge1(x) - x = self.decoder_2d(x) - shortcut_2d = self.shortcut_cov(shortcut_2d) - x = torch.cat((x,shortcut_2d),1) - x = self.merge2(x) - - return x - -def MosaicNet(in_channel, out_channel, norm='batch'): - - if norm == 'batch': - # norm_layer_2d = nn.BatchNorm2d - # norm_layer_3d = nn.BatchNorm3d - norm_layer_2d = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True) - norm_layer_3d = functools.partial(nn.BatchNorm3d, affine=True, track_running_stats=True) - use_bias = False - elif norm == 'instance': - norm_layer_2d = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False) - norm_layer_3d = functools.partial(nn.InstanceNorm3d, affine=False, track_running_stats=False) - use_bias = True - - return ALL(in_channel, out_channel, norm_layer_2d, norm_layer_3d, use_bias) diff --git a/train/clean/train.py b/train/clean/train.py index fbcd2b3..cf8371c 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -32,6 +32,7 @@ opt.parser.add_argument('--loadsize',type=int,default=286, help='') opt.parser.add_argument('--batchsize',type=int,default=1, help='') opt.parser.add_argument('--no_gan', action='store_true', help='if specified, do not use gan') +opt.parser.add_argument('--n_blocks',type=int,default=4, help='') opt.parser.add_argument('--n_layers_D',type=int,default=1, help='') opt.parser.add_argument('--num_D',type=int,default=3, help='') opt.parser.add_argument('--lambda_L2',type=float,default=100, help='') @@ -89,7 +90,7 @@ def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): if opt.gpu_id != '-1' and len(opt.gpu_id) == 1: torch.backends.cudnn.benchmark = True -netG = BVDNet.define_G(opt.N,gpu_id=opt.gpu_id) +netG = BVDNet.define_G(opt.N,opt.n_blocks,gpu_id=opt.gpu_id) optimizer_G = torch.optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2)) lossfun_L2 = nn.MSELoss() lossfun_VGG = model_util.VGGLoss(opt.gpu_id) diff --git a/util/data.py b/util/data.py index 7d472a3..60628a5 100755 --- a/util/data.py +++ b/util/data.py @@ -13,6 +13,15 @@ def to_tensor(data,gpu_id): data = data.cuda() return data +def normalize(data): + ''' + normalize to -1 ~ 1 + ''' + return (data.astype(np.float32)/255.0-0.5)/0.5 + +def anti_normalize(data): + return np.clip((data*0.5+0.5)*255,0,255).astype(np.uint8) + def tensor2im(image_tensor, gray=False, rgb2bgr = True ,is0_1 = False, batch_index=0): image_tensor =image_tensor.data image_numpy = image_tensor[batch_index].cpu().float().numpy() diff --git a/util/image_processing.py b/util/image_processing.py index c832475..6f1dd91 100755 --- a/util/image_processing.py +++ b/util/image_processing.py @@ -104,6 +104,12 @@ def color_adjust(img,alpha=0,beta=0,b=0,g=0,r=0,ran = False): return (np.clip(img,0,255)).astype('uint8') +def CAdaIN(src,dst): + ''' + make src has dst's style + ''' + return np.std(dst)*((src-np.mean(src))/np.std(src))+np.mean(dst) + def makedataset(target_image,orgin_image): target_image = resize(target_image,256) orgin_image = resize(orgin_image,256) @@ -177,35 +183,31 @@ def mask_area(mask): except: area = 0 return area - +import time def replace_mosaic(img_origin,img_fake,mask,x,y,size,no_feather): - img_fake = cv2.resize(img_fake,(size*2,size*2),interpolation=cv2.INTER_LANCZOS4) + img_fake = cv2.resize(img_fake,(size*2,size*2),interpolation=cv2.INTER_CUBIC) if no_feather: img_origin[y-size:y+size,x-size:x+size]=img_fake - img_result = img_origin + return img_origin else: - #color correction - RGB_origin = img_origin[y-size:y+size,x-size:x+size].mean(0).mean(0) - RGB_fake = img_fake.mean(0).mean(0) - for i in range(3):img_fake[:,:,i] = np.clip(img_fake[:,:,i]+RGB_origin[i]-RGB_fake[i],0,255) + # #color correction + # RGB_origin = img_origin[y-size:y+size,x-size:x+size].mean(0).mean(0) + # RGB_fake = img_fake.mean(0).mean(0) + # for i in range(3):img_fake[:,:,i] = np.clip(img_fake[:,:,i]+RGB_origin[i]-RGB_fake[i],0,255) #eclosion - eclosion_num = int(size/5) - entad = int(eclosion_num/2+2) + eclosion_num = int(size/10)+2 - mask = cv2.resize(mask,(img_origin.shape[1],img_origin.shape[0])) - mask = ch_one2three(mask) - - mask = (cv2.blur(mask, (eclosion_num, eclosion_num))) - mask_tmp = np.zeros_like(mask) - mask_tmp[y-size:y+size,x-size:x+size] = mask[y-size:y+size,x-size:x+size]# Fix edge overflow - mask = mask_tmp/255.0 + mask_crop = cv2.resize(mask,(img_origin.shape[1],img_origin.shape[0]))[y-size:y+size,x-size:x+size] + mask_crop = ch_one2three(mask_crop) + + mask_crop = (cv2.blur(mask_crop, (eclosion_num, eclosion_num))) + mask_crop = mask_crop/255.0 - img_tmp = np.zeros(img_origin.shape) - img_tmp[y-size:y+size,x-size:x+size]=img_fake - img_result = img_origin.copy() - img_result = (img_origin*(1-mask)+img_tmp*mask).astype('uint8') + img_crop = img_origin[y-size:y+size,x-size:x+size] + img_origin[y-size:y+size,x-size:x+size] = np.clip((img_crop*(1-mask_crop)+img_fake*mask_crop),0,255).astype('uint8') + + return img_origin - return img_result def Q_lapulase(resImg): ''' From e100118e1db2386109421b06a3503914e6764114 Mon Sep 17 00:00:00 2001 From: hypox64 Date: Mon, 10 May 2021 12:55:17 +0800 Subject: [PATCH 9/9] Updata README.md and fix some bugs --- .gitignore | 3 +- README.md | 30 +++++---- README_CN.md | 33 ++++++---- cores/core.py | 39 +++++++++--- cores/options.py | 4 +- docs/exe_help.md | 40 +++++++++--- docs/exe_help_CN.md | 45 ++++++++++---- docs/options_introduction.md | 2 +- docs/options_introduction_CN.md | 2 +- docs/pre-trained_models_introduction.md | 4 +- docs/pre-trained_models_introduction_CN.md | 4 +- docs/training_with_your_own_dataset.md | 28 +++++---- imgs/icon.jpg | Bin 36399 -> 0 bytes imgs/logo.ico | Bin 0 -> 16958 bytes imgs/logo.png | Bin 0 -> 22588 bytes imgs/logo_withwords.png | Bin 0 -> 50936 bytes make_datasets/csv/stars_name.csv | 1 - make_datasets/make_pix2pix_dataset.py | 20 +++--- make_datasets/make_video_dataset.py | 69 ++++++++++++++++----- models/loadmodel.py | 2 +- requirements.txt | 7 +++ server.py | 60 ++++++++++++++++++ train/add/train.py | 4 +- train/clean/train.py | 7 ++- util/data.py | 60 +++--------------- util/dataloader.py | 2 +- 26 files changed, 302 insertions(+), 164 deletions(-) delete mode 100644 imgs/icon.jpg create mode 100644 imgs/logo.ico create mode 100644 imgs/logo.png create mode 100644 imgs/logo_withwords.png delete mode 100644 make_datasets/csv/stars_name.csv create mode 100644 requirements.txt create mode 100644 server.py diff --git a/.gitignore b/.gitignore index 5ebe676..a02b768 100644 --- a/.gitignore +++ b/.gitignore @@ -184,4 +184,5 @@ nohup.out *.MP4 *.JPEG *.exe -*.npy \ No newline at end of file +*.npy +*.psd \ No newline at end of file diff --git a/README.md b/README.md index 21dd96f..9ebb067 100755 --- a/README.md +++ b/README.md @@ -1,12 +1,14 @@ -![image](./imgs/hand.gif) -# DeepMosaics -You can use it to automatically remove the mosaics in images and videos, or add mosaics to them.
-This project is based on "semantic segmentation" and "Image-to-Image Translation".
- -* [中文版README](./README_CN.md)
+
+

+ +
-### More Examples +# DeepMosaics +**English | [中文](./README_CN.md)**
+You can use it to automatically remove the mosaics in images and videos, or add mosaics to them.
This project is based on "semantic segmentation" and "Image-to-Image Translation".
Try it at this [website](http://118.89.27.46:5000/)!
+### Examples +![image](./imgs/hand.gif) origin | auto add mosaic | auto clean mosaic :-:|:-:|:-: ![image](./imgs/example/lena.jpg) | ![image](./imgs/example/lena_add.jpg) | ![image](./imgs/example/lena_clean.jpg) @@ -30,18 +32,21 @@ An interesting example:[Ricardo Milos to cat](https://www.bilibili.com/video/BV1 ## Run DeepMosaics You can either run DeepMosaics via a pre-built binary package, or from source.
+### Try it on web +You can simply try to remove the mosaic on the face at this [website](http://118.89.27.46:5000/).
### Pre-built binary package For Windows, we bulid a GUI version for easy testing.
Download this version, and a pre-trained model via [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ)
-* [[How to use]](./docs/exe_help.md)
+* [[Help document]](./docs/exe_help.md)
+* Video tutorial => [[youtube]](https://www.youtube.com/watch?v=1kEmYawJ_vk) [[bilibili]](https://www.bilibili.com/video/BV1QK4y1a7Av) ![image](./imgs/GUI.png)
Attentions:
- Requires Windows_x86_64, Windows10 is better.
- Different pre-trained models are suitable for different effects.[[Introduction to pre-trained models]](./docs/pre-trained_models_introduction.md)
- - Run time depends on computers performance(The current version does not support gpu, if you need to use gpu please run source).
+ - Run time depends on computers performance (GPU version has better performance but requires CUDA to be installed).
- If output video cannot be played, you can try with [potplayer](https://daumpotplayer.com/download/).
- GUI version updates slower than source.
@@ -67,11 +72,11 @@ You can download pre_trained models and put them into './pretrained_models'.
#### Simple Example * Add Mosaic (output media will save in './result')
```bash -python deepmosaic.py --media_path ./imgs/ruoruo.jpg --model_path ./pretrained_models/mosaic/add_face.pth --use_gpu 0 +python deepmosaic.py --media_path ./imgs/ruoruo.jpg --model_path ./pretrained_models/mosaic/add_face.pth --gpu_id 0 ``` * Clean Mosaic (output media will save in './result')
```bash -python deepmosaic.py --media_path ./result/ruoruo_add.jpg --model_path ./pretrained_models/mosaic/clean_face_HD.pth --use_gpu 0 +python deepmosaic.py --media_path ./result/ruoruo_add.jpg --model_path ./pretrained_models/mosaic/clean_face_HD.pth --gpu_id 0 ``` #### More Parameters If you want to test other images or videos, please refer to this file.
@@ -81,5 +86,4 @@ If you want to test other images or videos, please refer to this file.
If you want to train with your own dataset, please refer to [training_with_your_own_dataset.md](./docs/training_with_your_own_dataset.md) ## Acknowledgements -This code borrows heavily from [[pytorch-CycleGAN-and-pix2pix]](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) [[Pytorch-UNet]](https://github.com/milesial/Pytorch-UNet) [[pix2pixHD]](https://github.com/NVIDIA/pix2pixHD) [[BiSeNet]](https://github.com/ooooverflow/BiSeNet). - +This code borrows heavily from [[pytorch-CycleGAN-and-pix2pix]](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) [[Pytorch-UNet]](https://github.com/milesial/Pytorch-UNet) [[pix2pixHD]](https://github.com/NVIDIA/pix2pixHD) [[BiSeNet]](https://github.com/ooooverflow/BiSeNet) [[DFDNet]](https://github.com/csxmli2016/DFDNet) [[GFRNet_pytorch_new]](https://github.com/sonack/GFRNet_pytorch_new). diff --git a/README_CN.md b/README_CN.md index de37161..090371d 100644 --- a/README_CN.md +++ b/README_CN.md @@ -1,9 +1,15 @@ -![image](./imgs/hand.gif) -# DeepMosaics -这是一个通过深度学习自动的为图片/视频添加马赛克,或消除马赛克的项目.
它基于“语义分割”以及“图像翻译”.
+
+

+ +
+ +# DeepMosaics +**[English](./README.md) | 中文**
-### 更多例子 +这是一个通过深度学习自动的为图片/视频添加马赛克,或消除马赛克的项目.
它基于“语义分割”以及“图像翻译”.
现在可以在这个[网站](http://118.89.27.46:5000/)尝试使用该项目清除马赛克!
+### 例子 +![image](./imgs/hand.gif) 原始 | 自动打码 | 自动去码 :-:|:-:|:-: ![image](./imgs/example/lena.jpg) | ![image](./imgs/example/lena_add.jpg) | ![image](./imgs/example/lena_clean.jpg) @@ -26,19 +32,20 @@ ## 如何运行 可以通过我们预编译好的二进制包或源代码运行.
- +### 在网页中运行 +打开[这个网站](http://118.89.27.46:5000/)上传照片,将获得去除马赛克后的结果,受限与当地法律,目前只支持人脸.
### 预编译的程序包 对于Windows用户,我们提供了包含GUI界面的免安装软件包.
可以通过下面两种方式进行下载: [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ)
-* [[使用教程]](./docs/exe_help_CN.md)
- +* [[帮助文档]](./docs/exe_help_CN.md)
+* [[视频教程]](https://www.bilibili.com/video/BV1QK4y1a7Av)
![image](./imgs/GUI.png)
注意事项:
- 程序的运行要求在64位Windows操作系统,我仅在Windows10运行过,其他版本暂未经过测试
- 请根据需求选择合适的预训练模型进行测试,不同的预期训练模型具有不同的效果.[[预训练模型介绍]](./docs/pre-trained_models_introduction_CN.md)
- - 运行时间取决于电脑性能,对于视频文件,我们建议使用源码并在GPU上运行.
+ - 运行时间取决于电脑性能,对于视频文件,我们建议在GPU上运行.
- 如果输出的视频无法播放,这边建议您尝试[potplayer](https://daumpotplayer.com/download/).
- 相比于源码,该版本的更新将会延后. @@ -62,13 +69,13 @@ cd DeepMosaics [[预训练模型介绍]](./docs/pre-trained_models_introduction_CN.md)
#### 简单的例子 -* 为视频添加马赛克,例子中认为脸是需要打码的区域 ,可以通过切换预训练模型切换自动打码区域(输出结果将储存到 './result')
+* 为视频或照片添加马赛克,例子中认为脸是需要打码的区域 ,可以通过切换预训练模型切换自动打码区域(输出结果将储存到 './result')
```bash -python deepmosaic.py --media_path ./imgs/ruoruo.jpg --model_path ./pretrained_models/mosaic/add_face.pth --use_gpu 0 +python deepmosaic.py --media_path ./imgs/ruoruo.jpg --model_path ./pretrained_models/mosaic/add_face.pth --gpu_id 0 ``` -* 将视频中的马赛克移除,对于不同的打码物体需要使用对应的预训练模型进行马赛克消除(输出结果将储存到 './result')
+* 将视频或照片中的马赛克移除,对于不同的打码物体需要使用对应的预训练模型进行马赛克消除(输出结果将储存到 './result')
```bash -python deepmosaic.py --media_path ./result/ruoruo_add.jpg --model_path ./pretrained_models/mosaic/clean_face_HD.pth --use_gpu 0 +python deepmosaic.py --media_path ./result/ruoruo_add.jpg --model_path ./pretrained_models/mosaic/clean_face_HD.pth --gpu_id 0 ``` #### 更多的参数 如果想要测试其他的图片或视频,请参照以下文件输入参数.
@@ -78,5 +85,5 @@ python deepmosaic.py --media_path ./result/ruoruo_add.jpg --model_path ./pretrai 如果需要使用自己的数据训练模型,请参照 [training_with_your_own_dataset.md](./docs/training_with_your_own_dataset.md) ## 鸣谢 -代码大量的参考了以下项目:[[pytorch-CycleGAN-and-pix2pix]](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) [[Pytorch-UNet]](https://github.com/milesial/Pytorch-UNet) [[pix2pixHD]](https://github.com/NVIDIA/pix2pixHD) [[BiSeNet]](https://github.com/ooooverflow/BiSeNet). +代码大量的参考了以下项目:[[pytorch-CycleGAN-and-pix2pix]](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) [[Pytorch-UNet]](https://github.com/milesial/Pytorch-UNet) [[pix2pixHD]](https://github.com/NVIDIA/pix2pixHD) [[BiSeNet]](https://github.com/ooooverflow/BiSeNet) [[DFDNet]](https://github.com/csxmli2016/DFDNet) [[GFRNet_pytorch_new]](https://github.com/sonack/GFRNet_pytorch_new). diff --git a/cores/core.py b/cores/core.py index e990a21..f9cc64c 100644 --- a/cores/core.py +++ b/cores/core.py @@ -1,5 +1,6 @@ import os import time +import torch import numpy as np import cv2 @@ -30,6 +31,7 @@ def video_init(opt,path): continue_flag = True if not continue_flag: + print('Step:1/4 -- Convert video to images') util.file_init(opt) ffmpeg.video2voice(path,opt.temp_dir+'/voice_tmp.mp3',opt.start_time,opt.last_time) ffmpeg.video2image(path,opt.temp_dir+'/video2image/output_%06d.'+opt.tempimage_type,fps,opt.start_time,opt.last_time) @@ -59,7 +61,7 @@ def addmosaic_video(opt,netS): if not opt.no_preview: cv2.namedWindow('preview', cv2.WINDOW_NORMAL) - print('Find ROI location:') + print('Step:2/4 -- Find ROI location') for i,imagepath in enumerate(imagepaths,1): img = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath)) mask,x,y,size,area = runmodel.get_ROI_position(img,netS,opt) @@ -77,7 +79,7 @@ def addmosaic_video(opt,netS): mask_index = filt.position_medfilt(np.array(positions), 7) # add mosaic - print('Add Mosaic:') + print('Step:3/4 -- Add Mosaic:') t1 = time.time() for i,imagepath in enumerate(imagepaths,1): mask = impro.imread(os.path.join(opt.temp_dir+'/ROI_mask',imagepaths[mask_index[i-1]]),'gray') @@ -100,6 +102,7 @@ def addmosaic_video(opt,netS): print() if not opt.no_preview: cv2.destroyAllWindows() + print('Step:4/4 -- Convert images to video') ffmpeg.image2video( fps, opt.temp_dir+'/addmosaic_image/output_%06d.'+opt.tempimage_type, opt.temp_dir+'/voice_tmp.mp3', @@ -119,7 +122,7 @@ def styletransfer_video(opt,netG): path = opt.media_path positions = [] fps,imagepaths = video_init(opt,path)[:2] - print('Transfer:') + print('Step:2/4 -- Transfer') t1 = time.time() if not opt.no_preview: cv2.namedWindow('preview', cv2.WINDOW_NORMAL) @@ -142,6 +145,7 @@ def styletransfer_video(opt,netG): if not opt.no_preview: cv2.destroyAllWindows() suffix = os.path.basename(opt.model_path).replace('.pth','').replace('style_','') + print('Step:4/4 -- Convert images to video') ffmpeg.image2video( fps, opt.temp_dir+'/style_transfer/output_%06d.'+opt.tempimage_type, opt.temp_dir+'/voice_tmp.mp3', @@ -156,8 +160,7 @@ def get_mosaic_positions(opt,netM,imagepaths,savemask=True): t1 = time.time() if not opt.no_preview: cv2.namedWindow('mosaic mask', cv2.WINDOW_NORMAL) - - print('Find mosaic location:') + print('Step:2/4 -- Find mosaic location') for i,imagepath in enumerate(imagepaths,1): img_origin = impro.imread(os.path.join(opt.temp_dir+'/video2image',imagepath)) x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt) @@ -186,7 +189,7 @@ def cleanmosaic_img(opt,netG,netM): print('Clean Mosaic:',path) img_origin = impro.imread(path) x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt) - cv2.imwrite('./mask/'+os.path.basename(path), mask) + #cv2.imwrite('./mask/'+os.path.basename(path), mask) img_result = img_origin.copy() if size > 100 : img_mosaic = img_origin[y-size:y+size,x-size:x+size] @@ -199,6 +202,18 @@ def cleanmosaic_img(opt,netG,netM): print('Do not find mosaic') impro.imwrite(os.path.join(opt.result_dir,os.path.splitext(os.path.basename(path))[0]+'_clean.jpg'),img_result) +def cleanmosaic_img_server(opt,img_origin,netG,netM): + x,y,size,mask = runmodel.get_mosaic_position(img_origin,netM,opt) + img_result = img_origin.copy() + if size > 100 : + img_mosaic = img_origin[y-size:y+size,x-size:x+size] + if opt.traditional: + img_fake = runmodel.traditional_cleaner(img_mosaic,opt) + else: + img_fake = runmodel.run_pix2pix(img_mosaic,netG,opt) + img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather) + return img_result + def cleanmosaic_video_byframe(opt,netG,netM): path = opt.media_path fps,imagepaths = video_init(opt,path)[:2] @@ -208,7 +223,7 @@ def cleanmosaic_video_byframe(opt,netG,netM): cv2.namedWindow('clean', cv2.WINDOW_NORMAL) # clean mosaic - print('Clean Mosaic:') + print('Step:3/4 -- Clean Mosaic:') length = len(imagepaths) for i,imagepath in enumerate(imagepaths,0): x,y,size = positions[i][0],positions[i][1],positions[i][2] @@ -237,6 +252,7 @@ def cleanmosaic_video_byframe(opt,netG,netM): print() if not opt.no_preview: cv2.destroyAllWindows() + print('Step:4/4 -- Convert images to video') ffmpeg.image2video( fps, opt.temp_dir+'/replace_mosaic/output_%06d.'+opt.tempimage_type, opt.temp_dir+'/voice_tmp.mp3', @@ -260,7 +276,7 @@ def cleanmosaic_video_fusion(opt,netG,netM): cv2.namedWindow('clean', cv2.WINDOW_NORMAL) # clean mosaic - print('Clean Mosaic:') + print('Step:3/4 -- Clean Mosaic:') length = len(imagepaths) for i,imagepath in enumerate(imagepaths,0): @@ -276,7 +292,7 @@ def cleanmosaic_video_fusion(opt,netG,netM): img_origin = img_pool[LEFT_FRAME] img_result = img_origin.copy() - if size>100: + if size>50: try:#Avoid unknown errors for pos in FRAME_POS: input_stream.append(impro.resize(img_pool[pos][y-size:y+size,x-size:x+size], INPUT_SIZE)[:,:,::-1]) @@ -287,9 +303,11 @@ def cleanmosaic_video_fusion(opt,netG,netM): input_stream = np.array(input_stream).reshape(1,T,INPUT_SIZE,INPUT_SIZE,3).transpose((0,4,1,2,3)) input_stream = data.to_tensor(data.normalize(input_stream),gpu_id=opt.gpu_id) - unmosaic_pred = netG(input_stream,previous_frame) + with torch.no_grad(): + unmosaic_pred = netG(input_stream,previous_frame) img_fake = data.tensor2im(unmosaic_pred,rgb2bgr = True) previous_frame = unmosaic_pred + # previous_frame = data.tensor2im(unmosaic_pred,rgb2bgr = True) mask = cv2.imread(os.path.join(opt.temp_dir+'/mosaic_mask',imagepath),0) img_result = impro.replace_mosaic(img_origin,img_fake,mask,x,y,size,opt.no_feather) except Exception as e: @@ -309,6 +327,7 @@ def cleanmosaic_video_fusion(opt,netG,netM): print() if not opt.no_preview: cv2.destroyAllWindows() + print('Step:4/4 -- Convert images to video') ffmpeg.image2video( fps, opt.temp_dir+'/replace_mosaic/output_%06d.'+opt.tempimage_type, opt.temp_dir+'/voice_tmp.mp3', diff --git a/cores/options.py b/cores/options.py index f1d612b..bba8bae 100644 --- a/cores/options.py +++ b/cores/options.py @@ -101,6 +101,8 @@ def getparse(self, test_flag = False): self.opt.netG = 'unet_128' elif 'resnet_9blocks' in model_name: self.opt.netG = 'resnet_9blocks' + elif 'HD' in model_name and 'video' not in model_name: + self.opt.netG = 'HD' elif 'video' in model_name: self.opt.netG = 'video' else: @@ -116,7 +118,7 @@ def getparse(self, test_flag = False): else: self.opt.ex_mult = float(self.opt.ex_mult) - if self.opt.mosaic_position_model_path == 'auto': + if self.opt.mosaic_position_model_path == 'auto' and self.opt.mode == 'clean': _path = os.path.join(os.path.split(self.opt.model_path)[0],'mosaic_position.pth') if os.path.isfile(_path): self.opt.mosaic_position_model_path = _path diff --git a/docs/exe_help.md b/docs/exe_help.md index 98ee6a2..d5b96aa 100644 --- a/docs/exe_help.md +++ b/docs/exe_help.md @@ -1,7 +1,9 @@ ## DeepMosaics.exe Instructions -[[中文版]](./exe_help_CN.md) +**[[中文版]](./exe_help_CN.md)** This is a GUI version compiled in Windows.
Download this version and pre-trained model via [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ)
+Video tutorial => [[youtube]](https://www.youtube.com/watch?v=1kEmYawJ_vk) [[bilibili]](https://www.bilibili.com/video/BV1QK4y1a7Av)
+ Attentions:
- Require Windows_x86_64, Windows10 is better.
@@ -9,11 +11,29 @@ Attentions:
- Run time depends on computer performance.
- If output video cannot be played, you can try with [potplayer](https://daumpotplayer.com/download/).
- GUI version update slower than source.
+ +### How to install +#### CPU version +* 1.Download and install Microsoft Visual C++ + https://aka.ms/vs/16/release/vc_redist.x64.exe +#### GPU version +Only suppport NVidia GPU above gtx1060(Driver:above 460 & CUDA:11.0) +* 1.Download and install Microsoft Visual C++ + https://aka.ms/vs/16/release/vc_redist.x64.exe +* 2.Update your gpu drive to 460(or above) + https://www.nvidia.com/en-us/geforce/drivers/ +* 3.Download and install CUDA 11.0: + https://developer.nvidia.com/cuda-toolkit-archive + +You can also download them on BaiduNetdisk +https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ +Password: 1x0a + ### How to use * step 1: Choose image or video. * step 2: Choose model(Different pre-trained models are suitable for different effects) -* step3: Run program and wait. -* step4: Cheek reult in './result'. +* step 3: Run program and wait. +* step 4: Cheek reult in './result'. ### Introduction to pre-trained models * Mosaic @@ -22,10 +42,10 @@ Attentions:
| :------------------------------: | :---------------------------------------------------------: | | add_face.pth | Add mosaic to all faces in images/videos. | | clean_face_HD.pth | Clean mosaic to all faces in images/video.
(RAM > 8GB). | -| add_youknow.pth | Add mosaic to all (FBI Warning) in images/videos. | -| clean_youknow_resnet_9blocks.pth | Clean mosaic to all (FBI Warning) in images/videos. | -| clean_youknow_video.pth | Clean mosaic to all (FBI Warning) in videos. | -| clean_youknow_video_HD.pth | Clean mosaic to all (FBI Warning) in videos.
(RAM > 8GB) | +| add_youknow.pth | Add mosaic to ... in images/videos. | +| clean_youknow_resnet_9blocks.pth | Clean mosaic to ... in images/videos. | +| clean_youknow_video.pth | Clean mosaic to ... in videos. It is better for processing video mosaics | + * Style Transfer @@ -50,8 +70,8 @@ Attentions:
* 7. More options can be input. * 8. Run program. * 9. Open help file. -* 10. Sponsor our project. -* 11. Version information. +* 10. Sponsor our project. +* 11. Version information. * 12. Open the URL on github. ### Introduction to options @@ -60,7 +80,7 @@ If you need more effects, use '--option your-parameters' to enter what you need | Option | Description | Default | | :----------: | :----------------------------------------: | :-------------------------------------: | -| --use_gpu | if -1, do not use gpu | 0 | +| --gpu_id | if -1, do not use gpu | 0 | | --media_path | your videos or images path | ./imgs/ruoruo.jpg | | --mode | program running mode(auto/clean/add/style) | 'auto' | | --model_path | pretrained model path | ./pretrained_models/mosaic/add_face.pth | diff --git a/docs/exe_help_CN.md b/docs/exe_help_CN.md index a38b6e5..8ec4b06 100644 --- a/docs/exe_help_CN.md +++ b/docs/exe_help_CN.md @@ -1,18 +1,39 @@ ## DeepMosaics.exe 使用说明 下载程序以及预训练模型 [[Google Drive]](https://drive.google.com/open?id=1LTERcN33McoiztYEwBxMuRjjgxh4DEPs) [[百度云,提取码1x0a]](https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ)
+[视频教程](https://www.bilibili.com/video/BV1QK4y1a7Av)
+ 注意事项:
- - 程序的运行要求在64位Windows操作系统,我仅在Windows10运行过,其他版本暂未经过测试
+ + - 程序的运行要求在64位Windows操作系统,我们仅在Windows10运行过,其他版本暂未经过测试
- 请根据需求选择合适的预训练模型进行测试
- - 运行时间取决于电脑性能,对于视频文件,我们建议使用源码以及GPU运行
+ - 运行时间取决于电脑性能,对于视频文件,我们建议使用GPU运行
- 如果输出的视频无法播放,这边建议您尝试[potplayer](https://daumpotplayer.com/download/).
- 相比于源码,该版本的更新将会延后. +### 如何安装 +#### CPU version +* 1.下载安装 Microsoft Visual C++ + https://aka.ms/vs/16/release/vc_redist.x64.exe +#### GPU version +仅支持gtx1060及以上的NVidia显卡(要求460版本以上的驱动以及11.0版本的CUDA, 注意只能是11.0) +* 1.Download and install Microsoft Visual C++ + https://aka.ms/vs/16/release/vc_redist.x64.exe +* 2.Update your gpu drive to 460(or above) + https://www.nvidia.com/en-us/geforce/drivers/ +* 3.Download and install CUDA 11.0: + https://developer.nvidia.com/cuda-toolkit-archive + +当然这些也能在百度云上下载 +https://pan.baidu.com/s/10rN3U3zd5TmfGpO_PEShqQ +提取码: 1x0a + ### 如何使用 + * step 1: 选择需要处理的图片或视频 * step 2: 选择预训练模型(不同的预训练模型有不同的效果) -* step3: 运行程序并等待 -* step4: 查看结果(储存在result文件夹下) +* step 3: 运行程序并等待 +* step 4: 查看结果(储存在result文件夹下) ## 预训练模型说明 当前的预训练模型分为两类——添加/移除马赛克以及风格转换. @@ -23,10 +44,10 @@ | :------------------------------: | :-------------------------------------------: | | add_face.pth | 对图片或视频中的脸部打码 | | clean_face_HD.pth | 对图片或视频中的脸部去码
(要求内存 > 8GB). | -| add_youknow.pth | 对图片或视频中的十八禁内容打码 | -| clean_youknow_resnet_9blocks.pth | 对图片或视频中的十八禁内容去码 | -| clean_youknow_video.pth | 对视频中的十八禁内容去码 | -| clean_youknow_video_HD.pth | 对视频中的十八禁内容去码
(要求内存 > 8GB) | +| add_youknow.pth | 对图片或视频中的...内容打码 | +| clean_youknow_resnet_9blocks.pth | 对图片或视频中的...内容去码 | +| clean_youknow_video.pth | 对视频中的...内容去码,推荐使用带有'video'的模型去除视频中的马赛克 | + * 风格转换 @@ -52,8 +73,8 @@ * 7. 自行输入更多参数,详见下文 * 8. 运行 * 9. 打开帮助文件 -* 10. 支持我们 -* 11. 版本信息 +* 10. 支持我们 +* 11. 版本信息 * 12. 打开项目的github页面 ### 参数说明 @@ -62,7 +83,7 @@ | 选项 | 描述 | 默认 | | :----------: | :------------------------: | :-------------------------------------: | -| --use_gpu | if -1, do not use gpu | 0 | +| --gpu_id | if -1, do not use gpu | 0 | | --media_path | 需要处理的视频或者照片的路径 | ./imgs/ruoruo.jpg | | --mode | 运行模式(auto/clean/add/style) | 'auto' | | --model_path | 预训练模型的路径 | ./pretrained_models/mosaic/add_face.pth | @@ -75,7 +96,7 @@ | --mosaic_mod | 马赛克类型 -> squa_avg/ squa_random/ squa_avg_circle_edge/ rect_avg/random | squa_avg | | --mosaic_size | 马赛克大小,0则为自动 | 0 | | --mask_extend | 拓展马赛克区域 | 10 | -| --mask_threshold | 马赛克区域识别阈值 0~255 | 64 | +| --mask_threshold | 马赛克区域识别阈值 0~255,越小越容易被判断为马赛克区域 | 64 | * 去除马赛克 diff --git a/docs/options_introduction.md b/docs/options_introduction.md index 1947c97..3888eed 100644 --- a/docs/options_introduction.md +++ b/docs/options_introduction.md @@ -5,7 +5,7 @@ If you need more effects, use '--option your-parameters' to enter what you need | Option | Description | Default | | :----------: | :------------------------: | :-------------------------------------: | -| --use_gpu | if -1, do not use gpu | 0 | +| --gpu_id | if -1, do not use gpu | 0 | | --media_path | your videos or images path | ./imgs/ruoruo.jpg | | --start_time | start position of video, default is the beginning of video | '00:00:00' | | --last_time | limit the duration of the video, default is the entire video | '00:00:00' | diff --git a/docs/options_introduction_CN.md b/docs/options_introduction_CN.md index ef775a1..8557d04 100644 --- a/docs/options_introduction_CN.md +++ b/docs/options_introduction_CN.md @@ -5,7 +5,7 @@ | 选项 | 描述 | 默认 | | :----------: | :------------------------: | :-------------------------------------: | -| --use_gpu | if -1, do not use gpu | 0 | +| --gpu_id | if -1, do not use gpu | 0 | | --media_path | 需要处理的视频或者照片的路径 | ./imgs/ruoruo.jpg | | --start_time | 视频开始处理的位置,默认从头开始 | '00:00:00' | | --last_time | 处理的视频时长,默认是整个视频 | '00:00:00' | diff --git a/docs/pre-trained_models_introduction.md b/docs/pre-trained_models_introduction.md index 2c4a3f3..cc74728 100644 --- a/docs/pre-trained_models_introduction.md +++ b/docs/pre-trained_models_introduction.md @@ -10,8 +10,8 @@ Download pre-trained model via [[Google Drive]](https://drive.google.com/open?i | clean_face_HD.pth | Clean mosaic to faces in images/video.
(RAM > 8GB). | | add_youknow.pth | Add mosaic to ... in images/videos. | | clean_youknow_resnet_9blocks.pth | Clean mosaic to ... in images/videos. | -| clean_youknow_video.pth | Clean mosaic to ... in videos. | -| clean_youknow_video_HD.pth | Clean mosaic to ... in videos.
(RAM > 8GB) | +| clean_youknow_video.pth | Clean mosaic to ... in videos. It is better for processing video mosaics | + ### Style Transfer diff --git a/docs/pre-trained_models_introduction_CN.md b/docs/pre-trained_models_introduction_CN.md index 9156391..9b82e2a 100644 --- a/docs/pre-trained_models_introduction_CN.md +++ b/docs/pre-trained_models_introduction_CN.md @@ -10,8 +10,8 @@ | clean_face_HD.pth | 对图片或视频中的脸部去码
(要求内存 > 8GB). | | add_youknow.pth | 对图片或视频中的...内容打码 | | clean_youknow_resnet_9blocks.pth | 对图片或视频中的...内容去码 | -| clean_youknow_video.pth | 对视频中的...内容去码 | -| clean_youknow_video_HD.pth | 对视频中的...内容去码
(要求内存 > 8GB) | +| clean_youknow_video.pth | 对视频中的...内容去码,推荐使用带有'video'的模型去除视频中的马赛克 | + ### 风格转换 diff --git a/docs/training_with_your_own_dataset.md b/docs/training_with_your_own_dataset.md index fa36938..bc2a30c 100644 --- a/docs/training_with_your_own_dataset.md +++ b/docs/training_with_your_own_dataset.md @@ -10,7 +10,11 @@ We will make "face" as an example. If you don't have any picture, you can downlo - [Pytorch 1.0+](https://pytorch.org/) - NVIDIA GPU(with more than 6G memory) + CUDA CuDNN
#### Dependencies -This code depends on opencv-python, torchvision, matplotlib available via pip install. +This code depends on opencv-python, torchvision, matplotlib, tensorboardX, scikit-image available via conda install. +```bash +# or +pip install -r requirements.txt +``` #### Clone this repo ```bash git clone https://github.com/HypoX64/DeepMosaics @@ -32,31 +36,31 @@ python draw_mask.py --datadir 'dir for your pictures' --savedir ../datasets/draw python get_image_from_video.py --datadir 'dir for your videos' --savedir ../datasets/video2image --fps 1 ``` ### Clean mosaic dataset -We provide several methods for generating clean mosaic datasets. However, for better effect, we recommend train a addmosaic model in a small data first and use it to automatically generate datasets in a big data.(recommend: Method 2(for image) & Method 4(for video)) -* Method 1: Use drawn mask to make pix2pix(HD) datasets(Require``` origin_image``` and ```mask```) +We provide several methods for generating clean mosaic datasets. However, for better effect, we recommend train a addmosaic model in a small data first and use it to automatically generate datasets in a big data. (recommend: Method 2(for image) & Method 4(for video)) +* Method 1: Use drawn mask to make pix2pix(HD) datasets (Require``` origin_image``` and ```mask```) ```bash python make_pix2pix_dataset.py --datadir ../datasets/draw/face --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod drawn --minsize 128 --square ``` -* Method 2: Use addmosaic model to make pix2pix(HD) datasets(Require addmosaic pre-trained model) +* Method 2: Use addmosaic model to make pix2pix(HD) datasets (Require addmosaic pre-trained model) ```bash python make_pix2pix_dataset.py --datadir 'dir for your pictures' --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod network --model_path ../pretrained_models/mosaic/add_face.pth --minsize 128 --square --mask_threshold 128 ``` -* Method 3: Use Irregular Masks to make pix2pix(HD) datasets(Require [Irregular Masks](https://nv-adlr.github.io/publication/partialconv-inpainting)) +* Method 3: Use Irregular Masks to make pix2pix(HD) datasets (Require [Irregular Masks](https://nv-adlr.github.io/publication/partialconv-inpainting)) ```bash python make_pix2pix_dataset.py --datadir 'dir for your pictures' --hd --outsize 512 --fold 1 --name face --savedir ../datasets/pix2pix/face --mod irregular --irrholedir ../datasets/Irregular_Holes_mask --square ``` -* Method 4: Use addmosaic model to make video datasets(Require addmosaic pre-trained model. This is better for processing video mosaics) +* Method 4: Use addmosaic model to make video datasets (Require addmosaic pre-trained model. This is better for processing video mosaics) ```bash -python make_video_dataset.py --datadir 'dir for your videos' --model_path ../pretrained_models/mosaic/add_face.pth --mask_threshold 96 --savedir ../datasets/video/face +python make_video_dataset.py --model_path ../pretrained_models/mosaic/add_face.pth --gpu_id 0 --datadir 'dir for your videos' --savedir ../datasets/video/face ``` ## Training ### Add ```bash cd train/add -python train.py --use_gpu 0 --dataset ../../datasets/draw/face --savename face --loadsize 512 --finesize 360 --batchsize 16 +python train.py --gpu_id 0 --dataset ../../datasets/draw/face --savename face --loadsize 512 --finesize 360 --batchsize 16 ``` ### Clean -* For image datasets(generated by ```make_pix2pix_dataset.py```) +* For image datasets (generated by ```make_pix2pix_dataset.py```) We use [pix2pix](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix) or [pix2pixHD](https://github.com/NVIDIA/pix2pixHD) to train model. We just take pix2pixHD as an example. ```bash git clone https://github.com/NVIDIA/pix2pixHD @@ -64,10 +68,10 @@ cd pix2pixHD pip install dominate python train.py --name face --resize_or_crop resize_and_crop --loadSize 563 --fineSize 512 --label_nc 0 --no_instance --dataroot ../datasets/pix2pix/face ``` -* For video datasets(generated by ```make_video_dataset.py```) +* For video datasets (generated by ```make_video_dataset.py```) ```bash cd train/clean -python train.py --dataset ../../datasets/video/face --savename face --savefreq 100000 --gan --hd --lr 0.0002 --lambda_gan 1 --use_gpu 0 +python train.py --dataset ../../datasets/video/face --savename face --n_blocks 4 --lambda_GAN 0.01 --loadsize 286 --finesize 256 --batchsize 16 --n_layers_D 2 --num_D 3 --n_epoch 200 --gpu_id 4,5,6,7 --load_thread 16 ``` ## Testing -Put saved network to ```./pretrained_models/mosaic/``` and rename it as ```add_face.pth``` or ```clean_face_HD.pth``` or ```clean_face_video_HD.pth``` +Put saved network to ```./pretrained_models/mosaic/``` and rename it as ```add_face.pth``` or ```clean_face_HD.pth``` or ```clean_face_video_HD.pth```and then run ```deepmosaic.py --model_path ./pretrained_models/mosaic/your_model_name``` diff --git a/imgs/icon.jpg b/imgs/icon.jpg deleted file mode 100644 index 672f6ed3b95d94f47de1e59ad73939879497ca47..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36399 zcmce;2V7H2*FG9Lf`BL}Jt7@Mq$4dLhAO>D7mzL>0@4i~jna#X^xi^1dROTvQl&{p zdhY>3a(8&&^B&LregAg9dv9Q8?>#en_O$iPv(~H$b__cUx}vJ6q6osn1A*{>KM?j8 zNC8AjOiV&dL`p(Ja_JH&*<~7v%jD#j>91a+qG4tLvoJF-G2LXp!*%o4Z8jz*ZgHO5 zcLha6M8I74WF&;7?+A+s;WELybm`J%^2>A-6m&wYOsqox#~Zz$YXiA|fONQU?I>AVO-Q ztE__Z#MiVfNp88(2nEGvlCs?||3a%Xu){8F^*s0z+4UQA^b8!FT--doBBEmA689t( z9zYe9lvPx9_4Ex49~l{2Ke4g3vv+WG_wan-D~Je$ywPs zxq0~og+-q#Dyyn%YU}F1wzjo*bar+33=R$d92p%OpO~9p_`SHayt2BsySIOEcyx?9 zImOir4@B^s@VUnUerLn@CgYC2uX1D!o&9h4gzXIB341-tMb|;mTuQ>2?deT z+>gyH|8j{iGcKb#{?Cj$K^UE^79_SFzy#M>YFT#s03>1n4pW9?*FN)F24x z_>?;>dwZ~ieRJbjMQoK+^W9F~%CAmeQPkK5@8w~Zp!^ zOwIC4&Z-ydVSD|{MM%_AW`NG&aA8UytR4h;UzA|TUap_pI5TZuDDS~_W;2RL_hLZ- z$zks+u3M>av8Np*7-&3C+Ng{hoUbb+mS$0s?JTfqFi9qSPIrrayqJVZMgN4Gsh$h(vfq< zF!##(=P$6ahwE)6J~pb3nkaYLoX{Q3%&ALwn`(`Qv`1&3pL4n9K8iMJgf2S;IAz_d z5_`N;p=3%bL&5J(?Hw)$^G!eQHH)yQvJU5`$ATQ|Go$ivaVXkPWEHj-d=Wpbqb=&` zLG+L8A~j5exvtlMp`7x$i1_BO|LxBH?+zC3g51Mk-c*Now7z{abZ1w4_K*yrc zZFbjs{OBJU?B(6;>AH{9t|YY=xfc3tvsu)WgL{gbCL@Q)WQxf6E*YfMCuPLsB;8|= zUvmuWJUv1~63vUm!Rxj1_aF<- zhyNAMoq->h6c^6`v_?$Nuy)A0lFWKiM<$d;*FcU&8;bWL7|xyjA!>ojSVy8`mMSyf zDrzh*mt)xAp~Kstw_wzz!mNh+SLnDD%8slF;j{y*O8sS)oO&(4Y*pr$-apvYTPG?r zc8MC^-V;psUD{O_9n};nBuMjCZi<>`9JAHxS(9H4F(4r5A5Aarra!r4@yzkO8RTv}qG3fudLr0iM=ewMikjkbOfxToYikLqL_e1626 zvez*rc$J4GV}$N?d#9p<#+?QGypz}Ghv=34YiH{xqhO&eXIn|Q>-+Q>iyzT~kmich2v@0n?={Z8a!}gQijo-zuJFjY<{dOk?#{>u%OtZIwVBYBlK1E0mqN zg7GAL$#Efr>B4kBnhX-z?SFZ=^e8nE>M%@*+A!;Kd=(lKW~wSqA4=aki})gnotIwvR9K9?6csXGqT{^pnT+1p6=+@ zkQ_ga_Ct9njTWB9rh5L1jE6u?3ID9Aurpo$d<>kdc2XPug1a5htu~uxLmN&kPGgk{ zR~QUrgbPAx$mZY=bp{6sJ7VFgZmTMCZNMv-$E=c}k|C;H54E09m z4oWwE=(oSF2GuDT8gkpNvl`W|t7NlIBB(G_u`K=K0eVhb9Cxo%)rC|VonOs2)Vie4 z0AfZ)jrt))pP%-s zAD+XxOIty>%KpE4#D37Td4*r3_RJ!z%VJskUcr{=^=_M5QdK1XslKAEbrLWk^Ch`I zpB#tYESWOFJi>xLx8yn)xFE2g+pt-S!P?#QFYmtXi)Er8*lI&*=Thfz&9B`ujpkL2 z#2n8V4Wm>|B5_b0jDzB|mKPs1EgIB8mD*52A{uQuBB?5|)QOV*mNB3x; zI>=F&#wxu!o`yKvO$~0$&D=5nR%iaL){Sd4p}OVD_H^&FlHy|eBXT)-a@q-S^WBv} zTs%Hd5;3~S!S_&QADXGkZOa2j`Biy3kp%OzNcGpMpQIl$U!vRMZs*e6$*1oTYJ8Mp zEn3);?;R9)>6uW=bSa!i$G)}WOTwvPddT^9m;Oh7RX?uD=shpurth4twwb!teaO)D zYxP+b8=`(rJG7>^YZo;S5M-yM8p5UnzAXM(^BZ5`+{Ns`_yhfS51u$>HucUOn(;fg zBC9KAPpw6DjxZAv@($0$t+&#BdvYJ$=dXVAmeG$eP9!o39VS!=kc4qqpDWq}3km~I zR9o!-!h)C~bLM_+gT8ghCT5l%SkJcveaNPXz{w^S6qUY{eP-|t;|E*FNAm<&Meie< zNIs?8n*QD4>gUBa05-Sm4q<53v>JmKzU`&Im|PB);Y+7APbjF&X~u$tjYXF7K0+mV z#E}Tb(UC?Mo5{_xN*z}VV;}&)*hj{=@t`@~T-k9F1${VSZ|_Xgpqb04nfq8PL1$Ao z!C5u|lyG=fIh8+{R`knP_kOLm#@KYiQ&&|>4>90lSgKy0w*9bQGcawxgs~`f&&jKr~NKY`rT*}!b95u zs3x$_w~Fb%!3bCNq@fT5@L)}q#4Q5BAw^>qLy-lbf?WSs?tEQ)I$&hJ??|hhh^Gm? ztb>;jo)pwepx}_J@|!H4W~k3teXPM$Zj+iaoiA#>UY)P)zIxeo46+}B9I0!{9*nnV zCi0aqHG3$FSaCOKRm@z;2nPj|N(b{eMGsbW-yXMo|1Knhx!rT1D_-%F;+NQPdD=s| zr+R|ajheR*Ji0He3$oy=LjNsFm+ScgG=A+#T8UHu&pR)_3MRY7t?4R_+I7P-L9sB| z65en?s9B0JD2yWVCxr<{#_)U>9qIEPIoq|KyPM4d=SHT>+{D38e!j1bj8s3TBp}i; z{|ZpGk&)E;JD~8!7Cb7)qMj#H&^`-pby03FM`x)jQUIQHFEA`OCpQC!)dcprS>eH7 zb#|c~H_I4Qjk%Q_#vbZer9!1B7u@9Ybr@nPa@wV6v_!;WhE{X5q$sl90e#28ny;fJ zB2J@QSV=i=6_?_Wmlz%#G6?4`qxh2Zh2~UYFlI~WV~83s5eStE0r9+^3<@NK%P}z3 zhI~g|hq!QxX13$_iNdtX5F~-@&#KE03N({%>+>jO}cS8$BwykB(IdX z0;HC?Yr^$m8i^bC{ zs3;TO`UrQ+$*_N@qcf;=H%Fv0JjYr&C!fI?#JZ}Y>Z_w=AIg@jkxIdnlcge<|M&+9 z6WBOuaLD z+5g?6;7<2-WJpx{k5rDYioY~ejyCl?WfU0$WR1*BixwmZWR#k$4RZ82y!vaQ)$m~h z2%0?!w%-Fg`wfc(@5C8lL4AORyB&)WT{!a%_%UN6+tE%NjRjreNJsCbV@|*eN`E}T zGM)bHNO$9@{(t1p01FC@>%;hHVnP1VyG4@uuSzDy<)UECkWgQ+Z%@-BjgN25bB98+ z*(a{TdoVLq0G)P+&9I^E|G0MQJQCh`vENx&47Gu|P5@=P*8&ttHjqeje@E3Q5)9RWcv^aO;Y$@CigdX&I6&n!-s#eYLO5WgH_H0f=OKqO2-7?D(- z(qQ-3FX4!zF)XMa3yR6cDHxtu5UX08S=jeCBKoe?o#&(IFtBs<8S;ee9>>_dSX%&O zc;Ef9#;jxRm&*B%>5m2m*e^P{iA4aDM?eAaUxu3-S~nG2H=)4`xXB}+@C&7suDn~N zZZ+6Z4D4uC4@?=BN7p)Aa8u63Q*x!ja8qqKAqP!uPCJMu0hl5v zF2Nt_maD*tIaqVsX+ZCD@^u~V;uK6^e$WBwNbvS!L>4jGo&x(l6{OskuX4JNW#$?}$qa7IafB8byT(p=c$eNVY+(vEg9%6cR9C{tjGkjbD~_4Fw`|Hk0O)D-Z7@3df`{*#JdiWcFM?S`pM-ZZz3bhx# zi?Js3esAkaPF*6xTzbY+o@_J?eT|7P?=DPW1FNd-0gRG?73Zdcn;pv zfF^jxXsm)V6E3=!lZQew22SKwnSj={w|962AY$%g%Hbh3C?Bm`Wq3MV0UBOIuL6hB zOo(i0O(?;7?Gz1pEcSebb0fY3Um5uS!-KbK zc#P%Ins4$46{uMxU&ED2^`&a!7tte`S@5~|=U%p}kNCtb08!-VM3Z1a_i1yD0Kr5D z-GR(PPOp>{!zj()4yJcBX>eYU136qr6eUNa{g7u^Q2)>R!U8O)6L_NKoI+2Q{b98* z&=l+UF3OY+_O+3fJsX>=~;C_W1{EGXI*fH^?ojcAd%z@rtgW>`!?VcEb7IClv# zZfMX}Spp_jg#iOQ7+3M6xFnq2 zGl(26sF5Q=wqT!|3n+ehaR9ZGS9vWDP{14Xa8iJ0;O49~RpRjZxHN|xEx}5iqa4V? zGo8uD8j^$J1Tp}v7D8%~WE7Nza0wBtI408Htv4M_X$jg|_nG-M; zRG!!Jx}{oc6$^T@=RdQsFK!JMe@LYYv_S}bDl20!?rB$6Wy`0Q)fx=&*3s-UKFmkh zLd&`DUQ0ze(*zzLOJ77Ao8L8m$UI z?1kTCHT0nXHgR|z3+jepdZPclsh7>QIDyevZ>}CoNb)rTilZwQr0t4+2Q&d7!++kL zCc+lcT7CeRp#)@j3pQs0v?*|K08PiMy|eC$Zgt{K_4t^4|%bmZ>UUMjx6vkKu?7Y>;5~tf86}5hyX)!kNA5I@POHRIZwJ_p3`F3q6p| zljOqlk3et1aJ^;l&JfcZGvUKx_YQ5co3cN|a+vy%m{lNR*J4T3Jl*{{9I$YPzQd$K z_PDSh<7ku$Y;_0=x&=Ed1{cgj-T}!V&Xh}sv>^aL6gZcJ{no@(0VEO&8p_8!07i!P zA(IlWn61AQ>Y6l}`O^N(uFA4`|8bP20AqrfxJOq(VR*5+xw2!9`S&`g;hEtX7y)*~ zf~*fq>eywUJjyCQFFubRiLQc`pAK~2T!O-l5l6@)h;iNG*`S+~n>%ZQF1p26y2aOk z@(GOW*yATdf9nJ1n?z)-KKdQ+e^J_)-( zp5GwT0k3{bs?JZ*O#x_7&agmsq>`17?AOMMHVDHY|IUiRQ{m2!0+2+{xgAeF51~Z8 zXfv-z+5CLG)@R>|A(Fp6TV9(p4t#?iA*+70_^@u86P2JUOh&;0=Nm}6dwLXs5Z-nn zVdjn5_><9cHSSQ)q&%UvBN-<>)Qo#wylUBNy#ohd_<|_p6^3hg?~~(mcDoXX&!u{& zv$0CkbBY0hr7M<&@~hV_8KzX$S*bPDy|0Y#X*=ZTE;_Lakgf9VyZ_U*?^HB&&o5MT z$Oz57o0J(It(o$w50M)*|$!r0^{F|EYGHN z#$SHOJEWd(`+YtEnHAlf+4;ec?`%7RIS~Pmx9SsM6s~vvA`X;R*vS`A=mHxbsle=< zJ@pcX`I9S3W${s^L@c^r?!X%4SRM(drN7F*l&abDovOU_2o{VHd6So7aX$2@CY?&! zFh7oKU>Pi*d|TG`t;F_L_G6tTeM`(Z!pVXO71c!)qW?kf#SbT0XDYpFaPv^Y5uV{a%LT9}e@DSGs(!d|ql+H=?^E z|2UvnL1ULVH&4RJsauyzy8Kq{y3G}*DM`ynhqZtQ`ZbU79Vqd)o)#R)ICWIIz8O8r z)YXR8TO3J_X4qq7`bA%mGmq80$ASo}7Djv4zDzxRq1)DBrxerreA%^$#?s;{52fZV zPghnghlY0Ihx#~Q)_ep9&zb{iSIIAJ-y5HXCw4{(__rd9(metI?q$lx`hL6=(AKj9ECMJa6KUd;TRM`odg3($JBwB`0iARfKeFO>2KKk3`+Q4 zJT7K%kO8Fn*Id{-0$bRHtiQ+D*6>jQ`et++7F23|k_`HR%M5pv>H>D#n393JlLwrM+#FcEo*rSC42oV!~rnpXXNZT`}7C)?O09x$se ze!OUhrv@@l^&a8_tn9N5T|ThLp*6wdrZ@po<#AJirj|;D(F875Bfl$IIEr6RNzpOh zxGOUAcue-30>Tv3pB7vTp+{>DN#8{}*%};ccaTvU2ybtj(L5VM@Kaw+h7YfM%BXfK zRX8E;&pG$Q2*4kj6MviYp!(Q*j_NPp7hPS7?e$5kn-9=!6GzHsy0Pb^gmHzQ8VGj( zCfIUXxjXK`v(A<{>eb&+u=cLLWK(8iQzW8vLGO8Xp{Qt6n`7_=4G_NPG5MOcuj?hoi?UfK1p=!Rq&mkASz3^#_zOinV3GUTRF)iS-+wm&99nA zWTEfunw#w%^5@73fYb#dKWi1`Rx`BNQ`u5gQsj!aeLk7L1@w=&xOEm;5Ky zq1Uf3fB*^$p7H#T3GX8{XYy!8?MKHiFYPR%pWYF9^E7SFDdQ$fC0-|Ipmd8iigk{0 zYLPAJXS8B;?z7XHjqMi=Gb+PbiEeu(Q%)-ZX(>81-6U_}jF#>TsdjS_@B=$pmyhfP zP3EkzrYU9Jgpo|UpYG1Srg>{P{V=G5k&q?hEqu8OiSO-h_B!eKVQBe+Zz%=89I6sT zyb$9@Js5LOvCE3802#2$wQ+cTYBDVOjUH5c{$ ziiPUZ?{7ymdLite66*TI9Plnn8y0!P=FKBZKFPl@o}NA6KUP8FlNlE6Kq1Qul=lI` zWr;Kg@#P$Jy%qkb@~>XujuHc7}b%jb4S2>NI!8{sLaMPM!xG-Qr-4tM}84aJH90E zj^A8oO@b(b#Ws6d<`|A`s}`sa+8`(?1tEeF^1sa}(r zFq_e6o8?R3lwE^7fI)!!uprauecE@%$y8_<t~xuvYnR3xMT1 zWEYiw_#Gx}k{AJ$pmQEJDv0g{+2L}-9fmw5ur32vK+O;4EB+Ht4D_3?X4feK%Trcg zarjjKQ4_Tvf~iC9l^XwjjfC7qX(RZ%3$oyQy}`J2)qE_~yqrK$3?B2d=DRC9L@hX| z-{H=c>LaZyNt<5W?fxy$FzzIr?T@r3cjeyN{05KdKN21m=@?k%{T>=JA| zJ;|gZunXxQjaOmo>QLl(zh|0~{8-vOe$!B-m-pDHIco6h)N-;yV0l!!@u{karHT<# z{Kr^rc`rP9j@r*_P6PhbJJH6wJSN4TCtjv#o{oZrsVvR2cg|*;q9T}lBVOT~&OCcy zrZ3xkxIxZvi|~QI++7+~>&g6GeKjkK7Nlz^&*nFI>&luMo_7yCY!;iN= z$N@ESclYxDyN~|rtN-e(?7b{f`JQkokU=nI>sCf>FnMhnPo@f+W%yO@n?H-xi!Mq0 z?k9;u)y%p?l#rw=xo)gy2`?f~t7Q*oF`PS!O>^W(KAkEzdI-nY%h>^Zn^!X^iR3+P zTwza-j-Ta}mwhp$&rteu!rNvodiz?pD{nW8{_yWBPBZ>KJDgJaNoQZrEaSIDi&?17 z66YAA68*BLs`4_OIV#tlX?Kd$=}uPhu|&=Zyxx=Rc0gPc@=$51J46~**VnWJx$x&1 zX?J)DsGdCDd4_kIyxPdWz^u^v?q=Jj2qAi>e0KC*WKXuf`{%9J`Q(9gsL79`8tOj! zw%F>q?Tvuq0sd09hUzHqC>`jeOjC4|4`;|cIryp(`5R99&TTfXB?Yt5Fl5=2^4Jbp zv=QX}I%?K$l+sAYgHQ6>IL5ZNi#KLDYGoA|il@Ro{}9V35mg**yT-rN+=QLs>~}VU zpJhBrsjA#Zi1n;s0zUL#jk*Ax|r{MmeIFUtuX;5 zux&t*+toMJMZGER&NWwbE_v23A)&ODe_o_+PuiPRi$tj%Qk^U~1GbJDEQnSK&2bum z-mL~a9{@cHUg~Pi=Hl_9(vTL=0$u{xAnn|;orDF=1BzH^3xEu1Vl&6zV7q#L^dyo} z#x2=^U{axOV1k`~U_o;N!;t03n)AIGw7!Qd$!KYn1>KmDCKDf1nyo566?OTRX0F6w zoWtwhM+a+shG#J><8Hu|3|iyd+Gku1Z_`%$g1r_sR5e!U)2NeyaRN}K1SEG#THbNp+foi zfT#pw+=UA*<>8)7zLc3k+xZkq+MnCL-DyEa)p$9<0x==M4IG58^k`Y>x_CC{Dk= z=F0S3#Mi@)S`qSQy6HA0^>FHRn}&b!{rpI@o6r3E(e3X^$}{z^40*lmN=<62?md|x zy$RM{-8O@@VnGCsETTWr?}OCX@730^E&f`OSzlcs{bI!Q!}M2zH2rLjIqzA&oNaJKZtz?{?DDYSP2iZcXG3W9=(oz^W>#nmAmZS9nRPV@*8R^ zG^Rx7lb;`ln3=JzI6SiJWD!FJ6r*Gh&ht2vzGFefAC%~hOCdo?6n#j?vvh+&gy=`E zPc1<%O$v*= zMg)VT{iwrU(}!c)s;_1yO44?}J-ZHBI;V3>$>$;>of~zt-zyN+zj75kCXmMDAC?}~ zxwZ>7RC8t(spK6pi}@kB3B9?DBrKy_U=sDQaq(_2 zkZgIiWqTQMS*bg;@G=>Xbg*Rba~MNw27WOhC0(-1m^$%2QL8_0MDW`FLt>0}hA^s> z?*J<;>_B&v%gnA-^MqT{;=BqA8qkyj-~TJ2!Ak(ySsbt+Gm<^gOrz62v`_xI2e_-B znE5Zpq~D04>Okn5^v$9jWWwRVI)uDDZl2avIHnrOEX+KMSXaLk2V~fGVYZ%Ff$R|h zU=QefinT?X9^eyvV3Gj18=E>_Hn1dqvC?Xo2slU41HKf<*N_7?^I~)TVpx~$8K#*X z&>17`bS<+0Rn&;)BE#uXBC|~&qxA6;OnBufpuCxDXF9%<$jQp22zMyWc74D6+x$`5 z*Hm-6UDuA*`$H||8@BC}3&)}0dcOQiN)sAw-79X8>*$xKRg=lGsXQihCX^(*410mg z1W`%~8Xu-LKm1}((=03~UR%+LgKSE_IHzSf0=%JUW8clknmvO3zCW9cxQ4`LD#2m-@lTT8#e{S$ z6VQ&^!>FbCOC{Sai$B}A!~p)_yHWCV_gID(U+9os4TX8kCM(Du?91Z?5y>ct>`dCx z?kR=H@;gn_Cf=+|MT*RI#`F9b*l=2PB}28S?Ro}JuI^KOwuzUH{u(HG#r-fhFG|B< z_&+X1e|7Hv@;r*5(ZYA8CF(5^zalOC;FH45iIrB?utx=p62HQ_`xdw5z`|&Kl#xIL z$Gb-Z^Fj`#Ueb?Y1){$*Yn%zAeEkfk#P2G25|*ixCS8&@RHUD=;6v-|kfW50{65-8 zADMN3w>!%@%D-V(fc~zD?_G{&oIjJ%DomOd|K-N;HtFroFGZ#AcAA);L^0q4nb@ZSXb?An1sD(?xb3ujpb_Hf#mJq`$l6V??1D$Xro5CW2W`pcz?;C*l)N!kFQ2I%_%QzpqIV9VyjD3<)=E>Hsg zmqlV-mI2l+4=hV;A~A5^-|HnPK$JL0)UcrHmQ$V;f)&-jQuYD1i(bfj21c=x4}!B@ zhyb>WkP`+_D^A_P9Yr?qVBL&W|sSCl=32}VJD=5Ut_6YBKSsi(^I zg;c?-0M}Hw0+r7BA;nr!;mVcPh-dcC_kaq~dJcVEN|*+BFDcqk)Sk z&CYpcsEG-BZ~r)bu<=RFJ!IauGIY$6o%D28O2QEdO^^E(Uyp6|(xWFhNxzWx0hDI@ zjp=J6nJu5*&ruq=w<6t47gd^MEfN+`1q7RYWZrRo?Z$)*h@6i}^85K7F0&&e?p^ne zkV!Aeynh~0QH#A3p8s)JOSpp=8y)WPr4A-tvk(i>6(DzbEPtCFyc6-H7?Ky<{2;*VAX3U`vNtZf9(Au!R6oqlCUXb0V2Dtafn6A&u zJH>usAby&lpw(Xf|9E!O2F<5aPI%`l)(+iCpqtY8s-(2x^fFnVV6j8#VLwT0GuyzO zMsdWHgSe`J@}pqQUCNGznIIPK_y@c5t2&KNHpTe~XH$B&i^vt9E2?lkCn9{(B^g8H zE6@ocq%c&#OjSj`{ON$HA$X^H^0i|tKv=7~rSjvq`l>|EaP^E0Pu8F>v$H{8GN>90 zihK>SDa*6R)?Tcw6D>A+QI`Y~ldaJa1S>uZa9sS?;GC1aHLO@XPyTC|*E;T@q8@dE zMWP1Py_;l*V6anu>onQ0t91-VtJ3dZn0;%X=)K_PTzUSW^PwSx|Im3Ipw;wsVkBDT z&aE*onw%5erw(Rg zr)%&%DwKL76bq84LWh6>>aB$4oW6wxWkg`)qp_gZrLKt93elshGB5vEDYMs~ZM2Rl zAD;dVkBzOEYp&F=egy!}Ik;&nQ~Z`+!_W+MQP@ zRG=Z>kwu#If-8vVGJwAi6b(N!AH!Yr>jwp6X)GKfDkI);(~D}@)K z_Gcm%ljrK-i32G}E4p>9Vhc|DC&Q{suYjKS4UgzAzw%TUTixWp(^$*a@pEr@fsHFp zcsO|?%38Ig&MN(I|CR>_?F(NU+QpWgck3GurjaZ@a>O@;6PKx^4yx(rt8bI$B*(b3!G}HQDH; zm&*sPIPd%)MfRbicNSH7XX3%fFk@+8m80QwU}Ma*5Nfh#joDsTx(Ju~NS?^oR4U#b zeb$07#f&ulJ*s_t5;;#^!ZHzDbwk>weMeQ>=wqN)ZUAcgKa#!dP^^(+`pIvnqt14} zxh0;iCbWLJt~fu+skd2d9c;PdZ=l@HUYB6UJX%})WoWr5CU2859pYFYTA!Ei?kDBq z^S-Wc)hI9LwnQgvQ9G^bFlJCpiO=gQoIB^{U-L9@##iR_^BJ`7rhH4Ud`NqQ4E8La z7a&zQJKUh_|54kBL5l|(xhKp{z41Mpu_UP@U#3BzoF+!1YliLi?_0z zE9O@sYNh~D|y99+(3X#ov!10-yf|Djrb_*PS$%Y50Ot!y}gy&aq~m;Cx+9g z(XaJHvAxxagx;$iPU???h_6pyAvqVO?YljlZu_(>x^AzHg>J-!*lNhm60>Rc=JPbi zyZ4nJ(>rMoMuwTGl?I^P?35Cx23LPL?_@(mWS_X=dw+0mQLa7b%}Jvjz4_x@%o7U= z935_d&6NDAD6J@ejTchAI0n;I>bbUhT$ZBdBosUPThuJxD&{#=_GQBnB;i%g(O^!N z6+Yhmug1igwWg~v&a@WLCszel=j%KPcs%8zO>}9gMOBys4+1IFe=1r?R-#Xy#1Sk4&gyzWQL1-N5KZWX@xFVD!6`rFw(8y zS9G2VT?nu*>dn__{-0>R0{$N#{n)+NSzao!g!&o|vEZYSdEWJtCZ@DiKlLmT-JfBe z8A@3L+IMC{D`1C(3QU2T!1~EwM)51tCheYfqK!Q)JT%2TTNM7*3o{guP_&s(UKuBI z`@HKm&rj7(@IK!vW%P_unXGE1t@QGfbnX)IWNUr~9bH%5!HrE#JE z+~mSVk41j>m)Soi(XfhtPj&r<0%9NsHVZR-`N{9>3RT5ecObE(y# zdIS>h(wn_(9+;wRk^b$iA-%FC;AaIYa0Q$eV5dtnJ$Zj$2Hun8&MLT2UQJ}L0X{Iv zLl_cZ+7h)k=nKVye%n?8YAfKSN1<&v`!Y;nUMX(?O1qHy1*h$rGy_J;n4FfwHXwo8 zBmIUsixV<{=bLI`LaP#CbDuJI0rwg>Ux(yZoY5a)3|IaC`P^i{*PY@FX(rQ)+ga5_hp>7Bt7(UB@VrQ=QJkQl%?UUHUJt zb$1gAlohN7IPk3+baFT9XhU!QX+Ng*!z6BFK_E}k%1v^kv?FNLpcsiC6J_Eo`6!Y$ zD;cpN>Gw!U3hK-{sjVR_$MR)iJ95bU?+|?%{`#Y7o2ntMyyfrBhA8aPNHkVb1H%h)<*khOF}#^^P4fEcW{8<_lowKh<#4+)%&0X}|MAOR zmr%pZ(C=%y@A$@+io-Rne>a6*1+3tDM>XnBLYkMOnPRK;`{#m?k9S8~JsOmT5A;c~ zpuIt`g`2sQOk?$6rA}ew@N6d2=ukgWqtWHV;z-tPAKeR;%Pr-ot`#rEwP)@YYG`)U`!x1SGcG+(byd77=IGNe7|{w1PVU3+ zAyCD;7Idr;BEMHO5g^ilm_gpOKDT>daY(o4S-mk;M(pM>zi4+q)NxJzj zA|qDiz?WC+!#YArjc4wcSG9)?J`9#qO^OTx&4#><*0%7JmlikJH8uIh)UO(}5GQuI zH0Q|`B|_>SPIEGj#MPq$SuVv=Sdipt>uht)dCg9X+Kk(>>7%Vr?|%QPuspeuK|=lh zg#P$mFUXe&kS8#+xmh2~PiFDk$-5g@MLsoyCci(|AR&mK0DMDZFXTzV2Ox0(TN)L^ z=!^bwqlecGAjJI((HP%JN536O-+`Rt6*E-(a{<10qWNelEJ)4~-a>G|3%)8N2KbIL zZC#|E+Ciej07tS?Al-fAuZS5kz^12T0f^uMM@cajM3G4KnmGWlbqS7KV1(lk3fK-_ zY=D47@8t3aR^V0>aS=2T2!7Ueedc!lPyl=sE*ykQ)rv1b1D@!RMC*|@$Qm0KTyZMs^E{#Qc5a%N%k5nn^rUE%u-SWvKwwxQ4j z25ntRWD{6aIQqtqGACeYF@%mV*r#1dx4wEIEiXgHY5nS_28jtW?oB9X2_6o*aXc%V zMhAYq#Qr&6eA@eb?#sTv(;^1r3Fv?DmG6iLZAlyX7mdTaHOx&ta}Bg}kT>B=zO>R@ zv%N~~UQQIB5(B|$Iue~#0aSA^5{Kri70R3QRPNEqRzmU!3O1$kevg%>PuWOg-(Dw( zH>`h_A9aHK*s(;Jy%-`@IG{c0M9~&Me0ZD58A>*g%_N&5_w8I4(#X-17!0_{hb8MXCB{f zk)=el!oEgt+Qay$3e@WM!)Ui*JJI$n2P8v)NYX>TMx&G}Elvqm0hRr7`kdyOF}SDu z^ld}hC8*yFnr%Yh9Pa>dJtx-O$;MD`-oON&!Y~9}fQGXd6L3!BE!qJ?i`cxji|xo#<8ZpImAjh_^PH=<(`yCNE#>rDGpeWV}+S-d!5*$@;zInTVm^dAbc2 zB>MsjI$j0(Z{tkhWFJ_^Xi>w^PMzmcEeak{)qT-6+dc|-hdu$+|BD8LRBW^?|Cqs5 zSR0p!>hW0$?Oz0~^xc{4H;n%#uqz5M5r^5RGu$5J%?i6L2YPl7v10p7{VdNt<1C@z z&&ul$oflFq+P?g9%sXIEEpV)~c+m(2nBj=MGN1$o*~PzbO;sAP@uFpalj{78w*76V zVm|*ncV!)?!Y*wy*ch;-X##w9#C^t$M4th6l$Y&p0X8oLVrr5lgYQx*QU>tU<#5zr zHsnoA?PV~KbwQ=iuKM4br@w;$*4*tT(*++<(i|gYZti`CpWQGXyj^w9t1fve(_gzD zC67!-+X1!|ot@kT;kGBAtX`;wurU{}?mTKY;N@R?t`{OPGOO#bNNQm{7|m0j;8H9=Y2+fi3QwPn$2&Yg*=X#3W_UXbsN+c7_(SsguWC+xwe)QaZ zMbaBIJY4@W-64BCH!q#6o$M{kJyGjy8x8kS(S7ep_o5SnPESec`ZvnH+JiC4d+`-@ z{lK=S94?VXzA|N1T@^)#%L)%g<{k24l_|Jqq#dd6^P6@SSPLYqkEbO zdhjMnDtT}JwqZs7w;P2kmUqz;P0?mq#be6*4kMqsyp(s@1UrP3>5FMs6-@_p%ruBg zHhgGlv@PfYE|v^+pI||HSkP9or7f;u%kD-7{#KW2yqN# z9u14l6$p!TaRt~}hy4XlLIGQ{ALe%gzpFra#GVD`iTPLP#rCVrOB=Fq?xKq&Bi;vR zEs;t(TvKTKYh#|kuPnsi_{d+syYQe3*hKoTmp*a_EKqGN+v&@IBN+tkuZB6ncvdlw zUtA5{c?vOx~*J5tCN z*@n@enw~<2f2QS}jJq~{)eu*r=B_n7W_g;!C;lR(<#DoGr zX*HxEFP{ZZ(wb1EGFG_HK%k-$FE1i2C(`34GVy;k_a@*_w(sBgNQg+wSPRKELzIj} zc8z5;7(1bbWJqPt2q9#$(_*ZReWwTsPh>6I7(^)9*Rp4u=e=e;J>Td1{r&#`_x~Qh z<2{c5aWuK^d#<@==DyGCJU{2>Tt<~K77yb#G9T3u<6q5KL8tBi7~Ez%i|h5t*R|Fh zc=s@Pp#cImv*d(8r%;0>R6!*Sy{sC}n512i^uQK2kWjWGK;dbOA2|w41Zrst?F1tYhln-iP^~~7MJ~@n|Kht zd8R>QN*Xg!C^AXg0^&XaR5?e-?E)v{EtWi>(yOwa^-v~fYHOU{{Ph5-R}}h&eQ=%{{hA zk>%4)^;$~PzfQ&=<1i{Je$jtM&ae$(rnAocPn$mQ}+zGDi)O-oB>K;2>=*~{*7 zB*>Bs+(&)gTim2Xc7cRD=pjQa<*rIcw87afvbUd)#d|SIIkXFAWEA1GZIs|DEN~eU zpKvx&tWFkrdLId+>2T0W1D(x_avsWfM2zo2tJa5iYdi3=k}x_1jS^mLW(I;y5TI*B zmqAsAXlyj;;6Z{jsGf(~2ca{sKgN;xo-+o2)RC)-8# zcdOPX>LCVMG~6`Zv_a`GgJ3oP}SVc>1zm z2>JeKy3nnXrbU&*61Bz3Kl^vWIqQJa4(U(p@3nJ&C$r`st9ZKI@)f|~GHqakZQ2e2 z(S68*AsZS-5+4HFA+AF^;p)4D&KCO${Xot2wWBf#4Dt#+*w(%m(yL(C(4_yb+E1E& z*|Q1R+0+ISM(Wfy7dL3@#Q06;%z}Fnp#8UPp*8h+Nv-VstI1DRPY%rpmSG4omrYqv zGDx~>#Bh@R!;-3ub`dB;AlMp?x?pQ)f?R^2Mo|au&_bwvIEgzx{+N~}Bccndoq>QD z^2*>ZliAn--5U>6cpgeTya_W45 z@c*6$cn2z-%2>lvTMS4AFTXmX;_04WAA8nrJq4*y%gt|SMmRkuBa;sa0iVY>mUd!% z`+K6Eel0u<)L}qd_Aa(Z77+Tgw$r~aH4yf;2fowCJ_C|O4e$mAbfE1+8_qrJQ8Rea zUMkY5-EIu|$p&&?blGn;w%7XM7+CYKRuJfH18QW4PDT?8q_s+Y5?s~r?t3h7a7no6 z95RSc9+t(wGvrl;+Q$MN15JU$5D96gp@s%H1CvJxVABrLA$T=(;sr;zbCja&3ecE z7k~7xY6TJed2oIO%Xx6J*(cYO-llFnVx&RkAduZI9HxznFJ=~W^5Wp&;5>MA9EU%r zghV3uhVFKEPv8Sn1-9NJ0}2m%^OxD-$nF@oi%0En-7#~RZe;z31pcxdO&r;Hd=7oq zA(hrGED~>L9{1O1A9cd7mMH>Q1-HNN(dOP72Iw%*Osr)sXFBb)PRyLXx-{7sx;BGm zCR#;dG_`Varn9IskUacU%~UF?m6HpNNRKg@ncKjI5pVC!$w{-ysNx5W$uJmy4VNCXrW z%cWgM+iGIjw7cT%FzB>Ww5S8vT=sFNg3t)4qzKgTnur`?_NI0NA|9irO|@ZqtE?yu zz!~wZfEOX{%|BU^~62r`lWUEd&;wQewb<0|zJ& z8hHnA?Mu|tE9-heHS&9awiz@|iK^M>0~o%)I;YHm&M7wWT`&ILD}R}MRqC0&7FLSx zS)jYXe(D>kQMa6~jxH@|bC4jTgQUCeriq26o!;L~U=!JQoq7r)P5ve2qJi~X`ZY-I++==LM8U$hK7@Y(LIf# zSoHaT5G-|Hfgyr)jVFr~r(T3Ly(m|L#zo59db?mnARUgW&j!C^= zA=Ouw6<-0W_Nh`AxHyosRbmk7ArD|&ZE33Lf&Ux>z)@T8Rh7bkI*L0>os=zR=?t{L zI#odt)%R`$t?YkK_5bBW0M!q=J>BR=LR)D|;dTdaD)}w!qn@FuEa(!r383E-cs77$ zQ6xq7l4V4o!1Nue@4>pE(@rbk_2Pl*FN=>i0GC!B3ujLAOg#Ldef!qX`D86Lkt9%PILNl|cEWj!9w z#>E{U0#sle-y=lXqb^6yM|a`}jdax1*E10IamqoWFX*xek-9mjL~M*r`P2~aeH=gU zZrQE!uNYaGZBuq4*%4+xE{hv$#C?tbly64hU%lsn>;V%Z(qYA>e_?_!IO3*VkU zo5w6RSY#~czci(?>vnfS>y?I)b3&anV0Kbcf6b&XfE2nFK+N&U#vrv`g$>Mi7q5Tu z-vs1bBtiMhUC;|mWvRykikT;`z73u6(BDiv=J-SKM%4yX{yI2IlD z-h5<8$ze#zQ9)?M3!J8R$BO7o0wTJ`I;Ybm|Ec%=*`|vLM*VO&a&BX z24*#49D=I3av|*q0`p`dfrSNSsnG~|gh!z6o8I0 z1U`$}yup+Omzc8emyM}k@MXxF48)3Gf`(>FqHnScef{#scc#9!TL+7=$zSYsZ-1dI_;Fz##nsxN{nj?#jyg32!i8=H{jYIAv%4!9gVdR!?QRP|!$y4uUevf_`9H=tf0pXnI|G&e z!HqUeUU_2_KuzGeF_nt^hC6yTtAnl^dvND3Z347&PbNYX1lqEwm_fXKWM(zLl&odg{z3!keQu)`3%ZzoTZvIkyz9z zLbhtn`8f1LK40PJ-F`_ko{O{L?X}-^RNhU#QIz0HEY|!JGBjBmh{GsZ_>?G}++DtC zXmCGz*P%H1*;E3V8j2OyGlYckd{JsTzx1o505!M6aYx;H2+A#jxf;F{ zt2JBx<(Wn*$AQhSi{#11&gblM%iF68Z7^c((5Bmy{*xdl@?N#+F+wFPOlE`bcyGd0XW#hn$hfo=;y$omg z$B{%J30*H{fAdoR1qK5(kf;Z23Ma5idv?e(tEZZVdLh(x_a5-apgoWzl=ZS9*u3Co zi-J3@HiP;GBmw*bhP+)A3wC10l8z3NzNG9$BiJ_>Q`T+*3VxRAgg}Q#hckNI0LXze zVr2l|P?bRp?>d_`8Qy~yMMz=cOqP~0VFYytC2Sch04|Jyafq^bR%T$9X3(NfrV(&? zfvLhr4LJg1Z(J?M2e*egRq)AGJ@lXdR{mK zJ%k;#@k(4SSG<^5G2(4esH=|Xw?+aSHWS5~ooBiGTJPCO#+^Gx;mh)4n|`t_zJXVN zh)w#}SHGQHBrZ#3gxf-vqV$OGKKb#>INt4As5f!ZgSBk$uN%<^B`v`M*Pd^;Zl?Y$ ze7=t%F7$ya^9Z z8(PZv+)KZgg|X6-4PAuZ0d&DZ_8~Z?$%pJdF{erp9Z{MdQJPhNiMGVu-$=i|LB;Q3 zU{%)(Q-vC!<=*>3X0N~A#_9<>ZT3t&=3dF}*hor|^vKKXGbkz1um|sdXuSZ-z=sl# zdIya}odwHd?v;wYP@4I!7P-c7!9D8*LU)oKr}+K{FkMv`@XYiIQw=bfZDHV?-U{PI zGGdX;BrXij@?WEV4Eg_@wD%5SjsJQF7gr*n9Kb*I62Lem5P_SVj)*tlZ6?DhD%+8r^nx)f^2Yeh1{N2KhyD|6VYla?)T_+y!<@6`Uh?ze4la34bM@o2CP^FU&UH) zQYmtr)?rhE#JIrrJ8$_LsjKykf`n+~Kx;eyZp&GY_I%d(b#kIwxv&wPCzfaUM< z>eMMoHP|vBcZa|={i}~7t+69Lm4e@M@AYir2VGaQL;;PiCP;NY^!VUqqa*^I!*3#m z)^?MYs13dj_3u95X&3;kG6}E>_#Y4PfDT$hm?z^9{ACGg3_u7B49W}*?YcOjG-+YD z&Ta`q#4`kEAPgLM)d+C73~H=o7A2!YkfO;27bF9LF|-a$)Fn7WqA^J5q4#Pt+WNrl z_hf+~LLO=}XQcJGI>1Z;2#>=y#z`>ey-b z;mfNyvc~ZdHkXpoUl$nqI$~hDD@yoI$Mg4ZYCU!;PAi*-F!fcx(wQ@_k%h{sy}k~xPbo#ErPuNAA^GFzjc1>}0L}V%f;P!@Evx7G zUi#3U`P%Er%1-5`_tP+pJkALFk@SM>Z7RK znksXQCh|NU;)&#e4#MaLpms(M0M0_=4Wxk%;7PN6jRtlNQkFnIGdcO1 zS@3BYSCn;dYHoJ8EC<9zJ)pPAcw9;(!i+J&CfpDl%f_;z(u8Zj(zxyx3d6i z7H2A+1lyL1Bj|WE(uG*_S&vvS>AY`F20oPk+T4wVlRqqsGkNBOt;AsSH~s%^=4B$Q zbcPo_y7E@_Pe=o$b)&E?HxFb%o!X#O>VRBG!c?w0A;@#uS+8ufuSQ>Fn`u{;T4J-c z>KpXCQB@W4LFT}k)qM6k`|l1{(|^xfbHz(FB*#;Lq1@|QC4X{TC~V{Imz@huZjF{X zMgC;D(Vgj<)kF5HT-*AT7XYz;-E~v}nNu@S6s12QeFa_aOZ6SMC4*>j-KOt^g_=I|KIrVsx2xkiU?mDo$ljjBM1|ukB zs^R^*prqQ9q^~_h~)- z6IesFfO_6Lg}R0y1+@3^ z;`n=&N^}2&1+9dI%fyHOeDg`biv+zn0qMhoQ-zw;5)?*q#8 z>2<)R5}!l>c3)Bs5qceGc!wC>b#_>X08~978R$!@fI3Do1_NxF6$kh=bpRO|7-wYw znmho{2sb1_WP;Su)q;&y`0t~Vg*ILnSY&Lhb8VD%$tx9fiQ|p#wNkwYZ`2EsCrW3m z{MJlC$;;{fsHU2E4i4_&^saQYh^M~Ooqn&F4@_s87;q6!^9C#$_kj$Teu;0pu<)-a znW$>QJWm(|s9q`M@~`4D#fDpSh=*NRiI4J)05W8jzlmL5lyK0riwTUn>lWEiXnf<# zt09DCO=QD+g4c6r(X_G2=i^bkUEWGQF+kF{`;80+uV~%Ga+C=jX?m%p=i6~HrCio` z$bXYTTSLPxoU{#FUFp{F_J zRGpbarYb%05wHOT*eFwL9EOE6UzdJ179F@5=)mSHVdGzhb$;7k^v_R;Hw}Qv>2eSfcFW&lzJSR zh~HPwE2E}p(`nP{V7_qkQgrp~5K86ZWw(g{cXl<)kUAf|YM(X>UNA^1Auy0F?E}uPQ zXP-SQwXj4wANb$`!}N#^jaALr^y!AIKeaY1^OyPbY3W4#E@7B*3ET-^%*yEAwcYDr zLml_MF25cjy6WG<44UOw%4(TgO>bnAs0S9NSF8O(;%k9qI0MPx^mX|Emt+8l9dG>f zFLD^rLRHl02?#MU02~Vh0fY&`3`hpaJ{Hdio~C`}18^B2DcnFn4gt zVAA33Qo;3-ug^4EeiNGKY;RsVV)b~jzz@?!s+MW7i<^A9IaO}egP0;0k%k!!_|d)$ z7c_%qy2Fi6I{vz)?KAh%OX2&4EuVh(KRD<#huk?h3iHgrh`fbB`10;#@PB`2Wh&T# z7Q$z2zBmTBRsJ%wqnD88Y_gF|dRU$OLFGiz!ko;vKZ7Qpz7EVe(4n;1JZ9PRC{IWI z;;Wk?Xn9>b4`ntIVT4(hIl3T@tu&~)wO`36$#da{HTu-4Cj8@<7)=h4Oqf zc4{u-oc`vF%I>O4eR)oSU$O2c+OO1XGd%T|R-wvN2FRN00_yc;fkWRq!MLYs`I&Vk ztET)lC5g;}x+DHAEK_?;MLP;v9FqvvJej;}eA-!C>5m$@iTI_WPIqZd81YF7t8p}{ zOa3M#R=QmarO;g)j^?C%J+om%?ZxqBf->u;wc}U_trGfdl~ewAqY~x6W|%mn$0`Lq z<*fbd*i`b4jrDfmQsmP_v(qXQ*`vFTx1i}S&*4k`KPi*FLkAO(*KXOV{2tTMh7-|a z<-iih(*ra6*QEUe>?`c${MV@_*Md5{pzSh91nk;B=IjFolhy`g&vptpUOl_T68S6u z%s<|RnUqV*!dwOhIAR(Gj`vLZ-(z5-n^vLT5z}zsqh10^a}dtZY#-j^kS28zVV{Vk z=K=-^50TN;LGFJ72v~3o&O&IzF@yc~c&3O7PSAt1~J zT$9Uq5F@ZU&ljUZK*7Ga1fK|qQ5(PUdJSzTDLS0-G~mI5{2yj4chX-;Y9(GLbQJQ( zIhvQqoE>#)Uo5h(s**c;-Xe;J!kdbzO*E*)PTB@-Xl0tHcFaD`Os?El8^orG82iCT z{zfcne$V<`=E;3>S=5egIZi4fQ!CTraTDt!*IwNQ!cpuJ^VIPyKUJlCpPGBNrH5>0 zrSmUVLuOiAGc(c%D_jZfWDs@D5MIwbwZXcS(mSJkq8qeOy{&?i_BhX4cV4yW9#fY< zr{ZeS-FnLlHPzehAGe=)ZbEL`s|8+I<@1+%*Y9&xC-&N=S)W+{-4zgOKIS!Vy8^aF z?V}}{?V{SR|9re&J8s=}OE2v~1D{r*0|s*e-t;M_wQ=~wFo)xM|NQbzy5%p~rK@@6 zlPD>vPm50s&D1RXK(T8yN9SIr=-huljG#6HPc)#M_Qli#L;p?S63x=57mC2l;VL6IVO9eDdg#~vE5UEy zMV%9<%z7Iu{e`J=wMfxMYGn3Q=9PI-|E&=+Moo4?zDh~Y7lgV3u`(#VPp3~z4HjW> z?u4wDd<9x}7A;zY#UrzKHcsW`J@-JG-ny|5G(+@d!0Css-Xxv&yS+=i#(O&@bUXco zOI#;z)ph=Oqj5jA)$j99-t4LXd^cKP7IWk8+}hXS(aRjIB&{)BIrAoDNE% z=U;?K6>zU5RoLf@XaABmr2Zqr&s88LOrAh_@3Q~$cHTEM43kN@T2Sy_KZ3E(#n2Wl(*v z4Su)cGr?;Ie4McbitJ0|edP|mn0X(|)K8QZmF;7|c}?}-@vUo$nnz!x_<{_-bWKb{-nQm>TX!Je`g&<`Rka)U`H|*Yagq|*>sLU%ti6q7Gc^+?8DV%_9)8R zsBtHSE_@J{w<}i;BRcTu8zunBF=@5durC~Z-*4Q|<`iw|iSxt5_Zd?Sto+%>^c1Sk zmi(v~_V2v9lDj)DWprtlr)GL1X7H@e)Qqv?kjkuSmXo^Wr7yJZ0%+oE(H773wR9gG zFxaO<@7}evUYNU_xi^%nC_Im6u)9cWPk=2m;MAz7C_Mh9FA+)iuagpjtA-%2S zt`>|LOfM;^=yB#895_#0)|qW4Z}$?m73=nul=NFDeVu+#E62HXtY`EJP?ST(P-j*MH00G9_VRv9jW7Tz(owz-58rmvTqq;9Wl^ zXO@i(JG*Wq$5e8Fg)&utKk#28`wLn2#sLk_)C_uVX)Viw(&5*)RhDkJAwZd~RvA_6 zaxgUiq{u3%x$LM!7|ZPkn%T6Grr)grGiB^Ql4s)CvkQ8H;D|oQB&jJVA3jm_vp7{S z^GquJ$%U8Jp<&?ECmLV+`{$>qXyG?vpmT-00{S*~Xo(iXy)|Ant zO#}+z1vL?>^Hi6N6A+wmHo9x=JX3r#a2_&isbP9`l2N#b*h`eq_E?RO0+Gv;1@s&a z%K4Sj_7To=u5vm()7A1>wRf)cik-BW9pBvBNQTxC%^2c9uVbzF>({Jrem%R2Aysbg z8ctzutX#ad3i&DixY>BW+!AN5V8Fz^H?ut#Pq=iM<+3U%=o;loV}`;(4Og+*X~dVr zP0uSJO~S3ql=5^`^npI^{#=Qf8N)lzmZRK(Q5f_7CUx;{XkJl>>GRQ#PHe^Jg;T2* zuGob;bJGvT3Epoq?{Tw1qLvfolM=b;?(=~pNRFs8rW#Zec$SDSy?Na&cUUs>)kI?K za6B$jc<37t(ksqN8MdnZ7u3}B`XO#mrxkZNIw~&bv3(gb8GW+lGvBfAzz71+oVD?Rr)1ynd(uBTe5!UjsFW9F66T@+dWx^^r$zn7h0iL%_o6nmR& zAJF>`kAm2meRMMh0Ea9=GVpPjfQSLw2V{jIp`mR&8N<5hG#?t2HA%g`c#&1B4c5Y^ zrh*Eah&=1n24%;!zFRtwWs5?Xol8qr zCO@7OEz)t$mS)#k)<0YH$r81&j!Ng3sEwqnvAfH1=t?rE{R@{I-EWzl5H+UaS#@RC zas~4deV$BNbTKY1yQ$UwV*|USCi@}iwy1><< zQs7qX8-g`CfO=M)%HG8f20Jq-1xQ^klSGO-Hi+8FG3z1&4N;YY4hJ4y^X*Ruv^-h4mq5crc`S^3t6Se=pS+}!fc z+Y2{4C2wxx1Z2XSQPmTXG2o`v$UOW%mDPXF4_io!d7>!&nm;247$c|Sn&k$4B}d+Q zgyVSoa*8DTCQpTHFCLh85-%eYNt*GKW5SEBbpw4WD^nmB%kMd;;Or;`u;ijYwRX|0 zNOJtmy;pRd>I%I0ciCt$`zSTzIJ3)zyUDiyEajUM_`8(P9&Z&huk7t5;eX2cH{d~P7j@UrScl=D@E{m zYLUiOm=h0AvPdVv8n5QbrOt(Jmx)@*og5m;UP*VurFO*P?TL$bjW)Z%Z(dAM(^Y6r}{7Bat6a~15txc_dcCxRA7eTjEB7sNoP zvA^fLW9&FDr8*>1d_Z`{m-p(w-T|qL0zgT*TW)7PjoN76q6#3$ZX~~*^dQGDJs9@B7%(Ldp{bFY#Z1-=PPARV?syz8&=bT$E z^M#{Eku_yHQ{7r``xA@ujr3W^%u3jjCOL_XlK_GEI4%hq9SA21LU-TPxb>!{A`wA9k`wbavzInXt&DQsny&yzjuqa7j z+9noX!OB%+94KsK|8c~+F(v4_J>%z~lEvPPY6A!!ilejYb>oi_HCcTn?<2r;o32>o z>w+~~VMHNeArO*H#wVq?!cJewE#iHVaCtI*DEJ1B*BNRiCnJ&u$`Vw;K65f)fn}Zy zNHX9&ia+ZsQPiz?@~U%FPGxwJDMb5@f6dIQK%_K&y5YLxjhi3Q{;_9^ZJVM&gTBisf9|I_ z?|t(@g^FrrCypm6rz{sfgHN!q7tsH29G3qtPR;K*jIF$=YHJO44GS)uPs8&lNp2ox z;v0v8S;Giq?!BCEtMUYi8*_RVVX@~pHOTxSuk0ee`xa&DzbsbBgsaVZsuNQhoVY#+ zEdKz%LqU03ZeDv5c=Vt>;%d?9TR%mXcRJpkSu$Je`T*k0^-2$kAbz>GAV~y7&R<( zw}m{HJuEjIsNa?!8!I4uxWD$DZ5$%#tL#C2iQP>rimV3$t!Y|+<)X=poin+0<{+l^ z4!hEgKiH!wXGEqTocROan!h`K{|_F~Ft^X1`K7qyyJuQ2 zY~Z}c2tTg>tWD&UqZjKdZpbqK@+TzkjmeG#kL|ZNq9)x_XJ;zB*tZkCg_YL|?~Yy^ z0A1!k>H}7x^Mr|%YZE^c6#73;IzQmqZ-?lxQyFf*{bhRn{KMN}8b0B@^)i}} zB8%^NGGdsA;&m{V1B(ny!>`q*3b`Av&9+^63@p6yLx3j!fxuOpi3ndb`s2ZazURB0 z3*#Km2&WYI>Z}!CVTv~{+RVcWG^Y4T$OMag)Hl63J;Ghyquze@*zmQRFK!x4QC~jS z>^KbCqZ1I=?kEviy9aBi*ZERdDy1V~1hC-m?708=-<*jM`R7MZUf;<>!4WCp$KEu_ zb|edHN!wpqKM}Q5Wc#WW7~{0X*JU~n!N}tnL>YZ#CwH!LT1%)a3joE zFRMAR+fpGi)IaNH))RLPie+=#`rJ(aiSMuTCmfBEAK_rFG!HXLtR=*H-XO*42{>Ia>uOUzBqJ9`N>6qov4sG_Gor_tm3&!;YD zHgxrN4_Rr7LpjDf9NX%DVB(vx^lc4AjF35U3X?`(wXayc=`)n@J7y1+s|kL{v6#r+ zP30k$p*Lj}uODeRvmm+W({r27`Renb(O1zyZMg;;**|oDDxTJ}HLR%S16PJ)Hs${B z>+^r(uMzKAoQpm&&TL;b+=dui(8GHY=VCba*-Nm%*Nme4eE0oMyYo4YSqUx^kYoTd zuC#PtO%sNZ>dO3=E2j+5b0>M`At>&=a_x z?|7Z@BMrw}1*acp#QkEKaR)zbG^`t1n|#eGYB*=;!>tq?`<)u6tPqwWLC{vk^g~Zt z&AQuxD2W1>e3{ZeAa-%n66vHJK5vgVx_#RSF{Zm_LXpGOYvV^q)od5FUy*t^OM+d` zlH{a@6;d10z89O-1h-yibGo?cu87+#m{=s4BKcHk^SU1XdLL8eIrH`B^o4vf#%4&6 zTgp6Ns4YGzd3+HFnE~x<{tpcnklFtlTlmj&c|)Jo+}|giz1}pTl-J8~4j)?)KH9~UW%hB(FEht&mto{1FI{Vp#q zfpg{`ZeIMT=~pFC{HvF_Eb+xF17@z&x8BV?I})+?9b;zPh_a(nnJld}KsqXRRGvD$ zJtu8uSO8i;IDr%!njpLn`{j4bn*DJx5SJr+jfVC_z%NIKQcB4x%`eSVY628S7)@0e zdx&Tmog#uurbF`&_Xq_;zIyOIflJbvxj*OI8#ZOLUVLX7MmG@WrYYTzKOLPahk>O% zXg=6({hB0ua4GSMj-4U%AueTkp z^{qGQQ0hxZg-HIvp~qqLpFkLDOmVI0kPBf30VJ^Lg|GT(#_gbq`g=9Fqndj6XCl}7 zx@{U+v)Oe`i*`EC71>&5g$*1Y9K151F8C(+RJbQ5Jjqs!yhJ7g$%)mG5DI=X{26Tb zf0(Vm@;LVV+JELB{$CsaLn_YCHEj9zaKe_Vzm!OFasG&`Tb+#f>Gl#)(adBibaoVj~CBH?wzv8v`hYWVUCwnV>~IeKtc;cH zt4}CPki3uZAT=A*7B!0@?{F1;tvJkBB;3C1R!^U1oDfQ_pg6Ls}3+_$^#-Cm>_+ZMHVUd`4a^RrI& z=Fua|FYjgw7l{c!p3c8F^L&HFQbIn%z$M;L)6rp-&GOcPH%9|~N(NPA8l72Xb>_ow zKF;PFNJctmKPnILV7rsU{NDD|H=m!5(?4bB&Mj64XnIS;1#Gg+L|zGd)-6Br0%NUb zgI~D*^R#$FXRS9^rFMPXukqe(f2Ntkw;s55rli~sjrz#Z&HDV9oQa(w011Wjt>Ma0BBfeOg` O8!h|~tRdZ>1OE$JQl0Gp diff --git a/imgs/logo.ico b/imgs/logo.ico new file mode 100644 index 0000000000000000000000000000000000000000..75edca65882ea4a5a704dc11b6a28f06cc33f54e GIT binary patch literal 16958 zcmeH_v5pf#5Jbl!AcKt1N5+NcFW^MT-;fc)4?xEE96BU^;|?TZJG;oswCt9iir$%B z?b=?UF)~$CRo&Cm=ag3Y-|cP6|G!Sx?^1e|Qo7FHETvERYr21ayhzEu?b9~UHqbWE zHqbWEHqbWEHqbWEHZX1j-`>CJ_$^g4w-jEQcZjF&DAj0o3Fp$hLp*&$sYbI)IG5&~ z!{Zz0=r>1?Q_iHl^*qP?Q+u3pChe`~sk6^ff6AG(x1Q&ie`=2@GqE<}IvX?UUFtk# zCe}t=-^7f1mpV_GiM0{eH!-8$rOs1kVr|6rP0XlwsdH^6`o>Fh*0S_Vea`9CW}RL-Ls);n#C?A?<&bapRGjpyH`}{uLYWuuMk2Kz|d}y9Id{GtM65%uy$gGLhyreO{yw)x0;6wGn65I5X}=ntP5>CeliI^P-xe zn)fEMHsZ`0XU4rqbI&o#L|Q3tUQ{zw^WH?(Mx0sW%(xe6?m0#oYQ{ZlNUx?x*+^p_ z=gdXibB@fEH1`~9b@YewQg8PnEcItzbM(lWqfQ=WsEy^d-t?81?qXJR^vIc`?wr1H z*A3lsP0y-pJ$po+*AxAY2unG74T9$ra4KvO?dkXYB45R+^`!Zfv%hKqy``MT&d9uSeG`54mU13D zBlF7jP4v}U%K4l#@~$QFsecoFllD`8X=j~x>{;KV_3AI}tn-dN>wC0b{iU6C-ga&q zXd7r7Xd7r7Xd7r7Xd7r7Xd7r7_@6h>o$9l{y32c8b&B2oZN75Xb-!YKSReEr-0ly4 z#+#QvzibSjW$e2B=DXq5{cd-}?Gc|cetlSP{=xgBewQ}g>VEfhmp&M7|9spTKIS>M qX?^qAFyHh1h*t+Z%=vhHPam#+clne133hw?S>^q#W3;L>eEtFuf)#`S literal 0 HcmV?d00001 diff --git a/imgs/logo.png b/imgs/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..41b8ee3473c7cd4ef58ed3f3a7b3764ff5f68afb GIT binary patch literal 22588 zcmeHP2~<1+4?9 zh_roe)l$Vep+$`&S`@G<;M7J`u!@33k;+s4GX?3>zIMI;e{a2imAh8SJ@=fmhi~t* z&p!KJn;tSbuuo6>o&bP86DRmj1;FxS0J?r_*$qC)PEYlPKRvV)X6XQMcSir2>A&!u z4?r&m_4Lq$(BL4kLKEvQS8Bpl?)q3Q91VcPSFe>TqE!j3a8;x_&YRuv-BC76t@LKU zD-7laYyDJF>Iup5s%goSrz?`96(S|u*M}w1iy?tnRf3$QkBy1biS^!WOjiu=(PIvq zg@+_Wd$Y$PhOE%w5SE`NUd8fs=ejAlTs~`z$eriu;o-?0&Ej+UTn<;r;ql#go?@BuZ$2+^$(!DV9T2wm5`tnb2tkYEO1{SaM#2~a(E(#p#?)6#P{>MZ8*@pw`5(5Jh>oCNaUA4J9Qi#A@5g#_1?b5HOBjuI2FDxkx2E zP^oAir%jBH!OWEkjw(hKtBOm|!Lhvdu@I%;;P&CKB^w*tK3bPBem<0l>h{{vy6H(; z6=$kSr%8-gsK(ES9Gy*q=@O=@UPALHro-XwleJNr1dT3A^E)84cmD>81a-vkFho7S z8LHB&e`hG_X&Ls8w;YNgnVFH*|Gl^$+fPlen~ z>EWSplPeXTZYpksK&2A$ycBYUX}-Tkk%*iM%_r+isZl^0?bZ+rR31E!aFwTbjS^xm==gl1pI=D4CPEzpJ9JY6$FU}k_w(e5 zJU!j`?mRXNxrG?Y0d<@X!yPHS3QCyzHM5r-s(xvsuWs z#8}j6Fv>xZAUFIhM6FlFg!-!?xH>EbpXVj&*f;ApeIhOjB6u;2C#ZGMJ(Flynx>jZ z?_$&}yne-U1yMqQ99gAg)5q%9_`;taP>sV~kiJG%dG( zMRW?QB1#??se=0zhyAZBqu{E>@VEl#?hz3RH=YPOzFfrRy2(9NiU=>EM+8?m=J%EP zhuHCNRz{(To3D!R;8M#M4#!gdlkm^hX+8#>pdJhMAY+@R-Tab-mgCu^smi+<~az4n!PdOvrFv68H@x zJfuU4XPO1`Ms(Yru);=r%BzZlaU&ikw6O);{s07>p6EYzx<0r5#*KMH-Ws^YGB z#^TRx=shFtyhTv_r*YtX(`IP!oin>_4KO?^``pejY%=^4d35n}hr<@wHr4wdFj4S! z+nv^|cjbf8GsBWaZO=rv?Z$fNne571jmF@s^R^QQ(CGx7PVnz_g6jRTZ+$$6pbSzD zXTV!-pPQEWfz*0!0PO+h>Hh_{SUkWz@YB&nc>%kYC{o%UiIaOE5kGt&NjAIWY;Ojb z%;fjW+Fm@_Q@Xf2>R}So1DG-2`)?~zGBG|?I#DZn3hIA}eQXYUzqIfpZ8n@sE=!lr zTZBX>4xRqc=>)_F{)#v}<|msOJWwL5t-H_*Z2INkWWi61i=R2Zv$?z%4iKsz$VLIr zAgl|Ww-A#~9LRd<#Gw<1zxZOv&xS!$X8#rZORi@;^?&kkUr~CuD|bs1{BueD*qn*@ zFmF$+PZdsbnIS=)DK0X~e{qV7c3&u=R1NpuPy@6eY%oE%yl1pO7U+~!TsE#Bd}dVR zXJ=SoyQ;i(LOx~?@%h$B01&bfw|0`&>T!5v z1J)8ua6wd9d00Ot!5j(6P8o6-%>y}Sl3K@Lw%?MK!ITF%!QuH5lGa=wjwa{3WPJsH z0P|dO1Xk=AJ`qvt`v~PerA>N!L`$C=SOtFyN0Y;IexHkhuo?Ohsq9^~%|e|Pgsy|1 z1S(^=LMP#d^Z9 zmhFHC<(U%N8S^7x;)ZH4r!E7edt_JdRq&Y0C|_KBKL7(-7yb#9J9} z+&V>kkU5N7%V=vMM&={pt>CF`CY;xwMIy7~ydO^@I+F;dpxheuj;02eV6_Tq(p{0N zg<8pY1dRLvNGroQe++mIJ?* zjUwv!k8=dLiMd0@GZVJ)N@_jFF{y+j)n$v`lcigC8|yZT-8 zrjI6uZd4;xyO75-!~UK*@8?9jPYP`@a~2eZNe;8!9ovU92Nv9awp}soNSl7>VT;g> ztJ5xH6LceZC|mTyIBdBm=3GYb+vx8CXbJ~bFoG8m!cE^Ub4XrCv38x!Mj2)5*74mJzN}uQH0fU33BDOS=#!?k*y6Q=0fyu;i07*zq}Z&lrZeEg-yIY%-NRgrBh+5muds>`_P-$r%@ivvMrz z00Q4sY>Jh`@uFdE)VeIi#4PaanA3@oN{>^q{;cCt`=jTP2%4!Rq+$u18pV$}VZ0z-R+-~2~@!Pi?6Afv2;H@oV zYKbNl8`RDfnR`yUHlNB3`=F{O_x!|dT|LN_3|g`SFP%HK%+={izi8{Mg7c!;EB16V zZM)4uLzdKe`4&g|7Mxwy{9f*!L@4YuiqE-|r(CZ^I8Cn^w=k~ky`g3Hjn)KGhZB3( z>`U@LKd7vCv-ExUy>&NpFV_~BEuco8m*!l!bN_;v0qgSd z5j&e|pHI1y(olMTOX)(Z>!du+D1LoVmUvM4^NIT(=3be5?MhY2BY(e=Ci^|H&1TxA zB^Qww0^ohH=(cm#g{X4TE!)UGEhqnm79{Y>b2`_hC3wM+h{cj}k-}=BMN1ZOZKF8* z&5GrFPR{vkMM=_)BB*=JHL`@5Ind#353alYPV@OiWrNEL7qG6wBMu@5z@4JBzb$JT zeyI4A==1dl4&N>w%cAL*lkdv8YbS|{vIKsUzKja172dRL9$-P-AAoO)w71`Au5@DC zXGSa@JBZ-*7_O#}HLGHhC;X)Nd9LsCmY@}A7eSYCu=~KDM?T@d^(Z<*(%P3~&;T5% z{lUU#(&6IBz9SbxeLPyNJ|fFay4dlZE(K+-Y{9l8R$nhpDL64FjiwF2;2q}3U%R7q z_r0o9hw3N3VM^w5Jk1Nvd@)S0V=u=$bNR+An?vYUD|51QE!cMXNaC9F^>5af_nh11 z$*+q5%`IdO7Xz39MGndVqZ%%+ubMo3;XqSn0ji4nD}|#TUKvo;`&vZ9xE@3q=6P7= z>XVBiXN-DqV|``M?CKMRMkhK;!8zoGzc+|5fB{D?qa^ZeHLeAkc0Mp0TI?!K_NNz}+SCv6Ojh zk@ctS)=l>U8XjF}OSyY$&I+ms7+ZJBSiCOf}0%VTRAdija zR+--2%Lai`vT*=uY~83fr>OG5OUrY6<(PEk7wuiTSk$i~tEt;s8Zg6P4D?DZ6&DrU zxuPvIdPZ6dr>~3j8LO-q`}aP~Ys~|OPw1Kijb6tr>+6aQO7ei~l(o%_IpTQu%zU%H zPjzuJnkm6Nw~c-6)EH1o^F3flv2bp5h_s};H#}DKvSQx*aHpSd*^nT*P3T}U1v7{> ztKz`-R1sj9d*CzcW0rgG1Rax6rv)ulU#yDBzNYOR*^3;*fT0-0@>=86A&g1X@_B14 zL)MlrxLcC6bgW5y?UvG*yC-HXHN{|{-8d-*yloN^aJzguFoCYmgIi#l2ZQ`3(`0b& z_+Fd4G^K<+rtGp&a@3|5sK`7zdn`?va(*HQ+--HL$1od+Dq9Bs!magCQ*GoHE%r+7 z#ivfYPA{pfU4fVY>+f%=(JvSje5NM|tI6JIY*!b4{t+n23wW^4XNIq|&yf+g7K(w? z?W^-YHJO1kx*z{u(66r!rc}zt^GQP&<)f2JN>K&;yP@Wh2O=3b zmF>0h!k=>Bfu=X~mGkWJQSzS24xrE+ZY!tT=XD@a0AY!uuvBDD>Wvaalc_p>x-v8T z&=3!r1MJHs;M${H8eg*2fqs&sH~Y4F_bV1hgYG7q^muRb2Zx#?4;qKRe?L7c`-P+~ zV@%!o{yzFmbiHhgIyt_kr0vVn*d4kuFp%y$U`Nh_F=anh-kQ4k?FjG4YjA@wo!~%0 z%>G{Wlfpijwq(!g4Cpv{rZ=QB_ug~;^k?US)3ds~LC;vAKzX@WeO6eM;H}6$EW-g) z`K$Wo-pdD1F4|R7b@FPiaHg|m>Ikw_W!~=IwY4?oS9c^Yc|PZO)tSHBHp`D!S!(TQ zVBvnfxAex|UD+a+<^9!jIR<(@1TxR`>}t)_qj9Gv{HQrQcj5TqhmS3#cpHMkZbt+P z9xJj$(STwr>*=oXZA#W!L_QGg>TW%Wo_aEBk2i~V^>Mp0r79_wnP3|1Vz+N@ll_I% zl*htd7p}r@dcY{trtx`E{*ChJQ-b=f&x>bD*U=;&fSRIp%Y1T$AdXK9eRt2gHs30q zfBnRwq}-~w_hICrWeWff7p+Xmt6J?{zqT%^&k{?@HNU%E;RC(f!Ba1yN}_wLqo;J? z!INNSjo@qVT_dxr?H(irFZscw=9;h~a}Y##4p2CJY~O|}Rwa+F?z{uvhZovX1hxI` zz9BpFSgSdTd>7MW3%h~}xWgJwP;$~}_uZ|(B-j1&s{V9t&~eQ+246KaOxL09tNRHOxw)zNinST<-W2c3XSm{_n^AlDcjGs_LS=AGmx`c zX2JNGCQRKkBjf)BgHrRn1Cc$?>!K+K^TnlLq3@5d}K+&%HY16J_twr4$ z`hoXf$?xIFt2|z}(B)K@qSL2FJib+IR$)>O9urL8l(Cjh94N(-6qww{)w~N}YD{u* zH%$22v0-E(ElzcmqJuz*m3KVl>Reykm^*5GzX>&v0ZPP;O^F19xJBFrzt+RgaV${h^;+S{l)4@_&}>kMW(o5|eJ zYgKsqrpL)Yl<*G+NQaQO4Mi&m7?txNGVpr2DS`IP7n;x%&;H=nU=OWo2&Sa7U&Xlt zeiFFtepquklJA26&y@4VA3A20`7!j>QMUkuA98y4%jz-sL;q*@Dy znLZZH$vztoo9)q}gRj24ZOlPF0V^Oh#OcIY`0J^C|IE@z7Wx5|%m*6XIEjF%-|#2F ztc@qI@oB_(sKeO?Gw_ww!?nu#5<-zb#1xX-BZ*-VVH!35m1U{(B1+79g&b%lc}Ko0 zp_fs20i{R@rljF@-p}loY>MbZnbEQ@oMT$}w{rL}xifH0F}xjr<{JFg01njB_>*xH z^XmO-lr1Xz943Fvw?y2Et%2yU$<2KQ%x8R+a;Rf4H1|4^qrzdPMa(P;8VrGw*}p1B zIhBKMYmbQ>L9gOgB2X#aFn#}5{MWFX(0`lIXO2oXq2dXxpHmR_z5=+m_q2e@TS#%~ ztDC{z$nR0MdXc7jdXGLYVU690VYMX`rQ&J0-a?p1m8?U@*A0}prv<88ji&mzG)2Mp z7A)$Dd4xr&spNx|_>4HWQS@a7oLETn2gzhEG9@WOQv^c7RV?S*Z|r5fy; zW5{|mS{*}nd51{B8npckR@YQT9DyfOF}4vXhLW)^0t`hck!WgZs``z z!X8Mahvc;Y>tm!Y@FcynESwXu)wOi^NNm_fQW0Y;QrBF?=F^ko3Ud#4Ye|a5d~f0W z1kY~7Z>k|#rpwCl@W}> z6g+|&hWO?~=QGX;x5>Lxhn5O1x5I;Qy~9KJj|uEj_xVWEPE7^^3`AYoXP=vZ?8hC6{nKy=KEN@8%e!Fd^s9WDr|+31V)7lrq}J#6 z>+ZUOimgX)?Y3X+1ozY&62N8dHP(EFhzW1)o?O5bkkH(+6D{mD2Od&zhbgP1^Uo`H0ia2$pw z(ha338%@loZz!~QtxIjNFhIt^o-mBINFRL>XB>rDq!RoLZn*UsZ3%IUmI4i~xJ46j zLm+O^W2?~{bFd^!z7a1F(@V-2c>Dn`s(u`LWBLzn#$c?83fAPMc)4W6weFTbZ!}$@XuF!U~nGVmQ)wCCLY+SY1}VI2jIr*42e0 zpUWk)ct|wxrJ4>+N60X3#G~@ZBKQqXJ3sG^CW4u+1CihQO~%I{rpXosi$>xbAa^aE z`W=BoOZI>x2}8Z{)WSi#(R$b;A_wU*LfMZBQ8FA8o^5L4;B34`N#TG3@@k&oT8?%! z7JuMPT6pe@&;+aIk;PrVH54guit=d=#u~gy3Aef+J~CQwsJ6p0ZEj4)ulcJ<%->8Y zWsJaM>B)3Z;AjK|cg_@;d%(B#80w@BM;M=Oursz!YsWg%j;Nl<2r(0 zBM(dW6)ozu^*~#~84@BG%#gmyQ(=;H_rGL`hRKLqR18B{F!Z%q?#2*^G4LVDhcO0+ zCGJw3tT6)Xk6@pHmTer?K3`%-=A zV@LM!J-O;{YxgV9^MWg{ZzvG8q>_Crr*tISut5@8S!dsrcXY**gQtt@8ssTEdFPD8 zFMYz74yf+ZmVVf=Jjo~g{5K_=ZaG(8Pc9JUkT;o3pBb|94?o>6DNlL0yLLn5`FUBw z1Cr>zj#vfb-G{N=uIQKgUXgUo{NC<^(nqrl4wcvA3q-YrwHp-YX<01y%DbD#d*1OH zHRKD!;9ti*Nb+&grwN}*qB8?Sb>(|%jYF^ttUG@t-4Ily2(c ziBo>2wKnc*`lQfJeH0K_x->#hFF0bf+mz5_9DK%*{eoMz=O%MkIX8_3pBX;IMl%-e z&+K1jzkMZIVoiAYN~F)^sGar z*;Jr}>3)Y(?q7rK@t@p3?=Bnkl;X*{4I}ydi4p`Mx<=FymxH$qYsAN(ao4rGaABc~ zdEoW(6UF~7G|6hqYW?;8(;Gh>J$xj_qX!l_@JX#t3~&nMV|jB bAiOXes^5CL?&{taWVVUpC;RUlCrkYwskl&i literal 0 HcmV?d00001 diff --git a/imgs/logo_withwords.png b/imgs/logo_withwords.png new file mode 100644 index 0000000000000000000000000000000000000000..cc3290d3a766611b06cec9374943df4d61d5cad8 GIT binary patch literal 50936 zcmd?R2UL?;*FX9&jN*(4;}j7kq7r0AI-!Q@FoID88@&qzkPe{)5Hd1~3WA0aMCyp5 z(mP03M2rxMNI)PIiS!~(q{-b+g7eP%U+?$-zFXE^>#oaMB0PD{Is5GT+xzUpwNv`K zJHFlXErK9B2*-XqgCO7RL6EP?wrzzcJN4Sb;lFP@kD2-)h?EHWk1ODTrXPZQE9HF7 zgk*A34{zt;CS_~yao#~Hz|9lhMi4d408d*x7YEY*^A3*A?&=3;DryeyceYnQXsmEj z=A@^#gOl^IKyQb$f%@m{0$uD>>{koG!vt;)B-{M~Zm#Y=_yF~T?0NC{(*Dh_gRq*nJrRHAH=WJ3;5YSyP9%~iURv7U-(SifC*|SoC@rg^q9QFL zCoLx@32#XHkljhP0g~=MB41|s&B4dc+u4)k?BTv2ozeEZhc8L}AW+KLf}7{x)4Ka? zvI!U_9boGzEh{C1Zi)S(r;`WC!^g?vzj*!ckN<6uz1`oFd-{62vMIK=lXh@*aC2}c z`M|rfe=h*6J$dr)*Z(6eZf<|S?L#{12iw?W$bWp>=N#G7LHdk?kB6_fox@Q-SW)E5 zZhS~*9R9`6|G{v0ojuqSukGz%OY-nO=i%Y{MarkX@Mgc9td!jTpN*Z}?LGW`enD5; z9N_SqEy+RsAc$O6Qbs{i26s*thsP=6739Tbl<_h$oL5hR64~35Z2#lSN_bhte|y=( z-kC`LZ(sdeGI(tdR}b%#9`-;7?%@7^Jtt^upYrw~I=jLLK4)})-A_2GtthLas3<8X zC3|qcy&azD;q7J%>~?mub###Sba!Nvd-5cn;O;}Rb+>aM{HA^o_A2G&6KQ;<=% zSC)~qx03~>SCLncJa1>KD5-?AQ;|8ZXlswd$$c6Bn}?k*stV|Mjw#uD*ufnCB2MAF ztdhKff`jCFd7P}IgQEO-No5&DqU3o68HMxm^2)gL=YdAH^56};oxz%IUH>^NN|im# zsC-@pXRBm?UJ~YmZ%Xp=lC};wWgyK#S?;_7&Q?a&7PXmwt$fZQfTU+jcJTJuG{#?T z9XEe+b^ZtY*zf4ux&ilob8zt7f6~Lp*4fVIpB!etg3kB9ydn+rNuyTDnOge4So~j) zzwGP?@Z1Nq@IR5J@t+xqbC8#nKkuL@X|EuoD5)SXZzri@i?f%McTgg#oL5j%vb9y@ z5aa6n_cs5Fx$G5`RpjlI9V8W%>}@5_%gM`1s^H}8Byn;MI9XeJ8JxVV!awHv&s+RY zd*R6Om@~RJ@+K8$9lZXgXFuESLB&virhd>4wM7T}gPV_?|ARLEkL12N-rvaqCjH;o zC}(va4jO|3qap^K`FG-dZ2kWCq@TAX%BaZ5I7rGVDkw=3ZRKqx zZ9#iMY$^(hI2jdr98vkp*8d~v|MVFDS4iI^*v`q;-O&Mpqx8Z5ax-=w?tTv5|B!{J ztv8qi39Lu`Ako{yZNIIjr>nD_Eh;!^KX?1hQ1iEH?m7av( zj+_ffoLna--e0%uT&*em?%6QO?kW8|o% zKaTS4ADMo%|I*ECBPNHx_3wYAg%J8*{-VXYuXEGzWVB75)-nEwm$3_!n>V%QR?~85 z%%%GeJ2O@+j63F+1Ky;?jg1A;n@U|S46iIKtrjU&&)(o-qgD8(Ol!~N{_+duBhKmT zFTW(dO5OS8XV1f7I$wUSeayS(@8J*S{{J!{+0^Ud%&ON$-aGf-3xm{Xs>Zrav`*3G zsPU_@o}xbMEWSHwp~7K0ORH~y{6CcG__@BaGQ-Tb^v#ap3NBXaONPkZlm<)@*zm-PrRvYQHlM3mFdZ* z7<5lECk%qR+cSP3rgsXvV@)e&m}^C{~Q;nj+Q0)b+Huh3edjHoZ|d<{_^L^HogPF&+XR z?*UZ3ixQ4N(uYufkPrLKh3tLPDQ)>L+|*nhH*!Q=kyU6}?AV9aK%btMq|}%DPv6L-Po1pUpd+ud_53yWIO2mzd7tFb`8z_ETXU2eC8Nlro9L)tz&1?^A0S( zQfV5{fDMXi8^}A%i@szkDE}4uzn~CCEikK7xKF zQ|&%c;4;eQ&O6TJd6PMZ$XJ@l6}ECpHFJDGChPNGX$kTBa{W#TZh_fbw-}%r3FIF% zj;Z{aCLXjtXqBtkdw3dUos)tp+e2v0ouY={wT$wM=}nB>8vfuYIzrM;7v(Dw@VWyV zeTeJ}qox+aC+gMFhuOAlW*|{Zp2=!PZiRR>6;Hj(mIR{wR-0hOnBZ^Niqb|gU( zsX_}YbyMTt!BYx()4t;TMQL_bpFMjg_?0s3&;i3wIZHG(FJh%<7Q=Cl4{>?$mkiAVkKz&(yI zYR_8`=3b*Da~>c3@>u<~(1Q{q3@T{PeVpyt%;t!yj4p=n<&fm)(7H{z+58;pL~6e+ zquRqJN;zvbKTF-|nJC`D<+Bil`WnRZ`E6{ppdvb3`9ORJmzMZNGwO&T__rmt6RMTn zH?bI=5YuK;!J*>_(9y7*GYrzv^6%C6?y9T~Wi4V=oZRE`W9^4t_UxsvQdujano4K+ zmsaX_v#0Ln#1pMmCY=;8I#As+h}w-!Lmxr23mBYWtQ3=_q@;ap#I6ffNGC~7$eHpM zH{kneO6+Y4sF1IUqmtl`7xc`bv>9OkMEo?R=+@YGuXU*6+h2_ztV4s8mN>@(oTANv z;zIm+#-X&QN7BR21}*hq)TK}uvQA&ua`=?_P-V_dlSXHq4Abq2 z=fhZvU%Md8nKM(Z3Xf2{J+e@(zZ1E26%6hL(0^>siePn<<>zK7t_B?T&qPI!e6PC4 zOV+@}T_RlR9FIiT;T%Z%jz7O^DYPh!+j!mJm z6dnfm<|-;7h`!=uK8oQRUJUb?cN%y~gQC@9hY-@epF3~R$-(o z^FzyHK9f90`||bA8J`LXeNqqk!E!wnjWf7yI!@|@RbhkZQo5>yeL0iAd@#w?RD-P# zv5mw=A^Y{tUz?g|&hg0LYEEPFY&@DS>|j<((PoU%;Mde`FRu)5_r!I4snD_I)n2pim_LxMYy=^8a_@%t(K$KS^GBMK<5GPp(Mu~9G!iac(g3m zz7&tDs);&B@lC$fjXD=(MeV6qng4hSw#tdCMiCsZ5&Ozsem6vv8(8UM-lfWp$@{?F zBy^zArgnVO8=kof3&gl$pBS3U83%zGW^-29vAKd7-RnYvX&>`O4)$#}+c6r2ZO(30 zSkXqL_7scy%%W@Ous@AW;e2ZJ3{<3r)HOQWO?^^^%9*Vnl=mEeLAfm}xPwnD?Wte7 z?qkUxbRVIt@o{9kK+5fQp$8u;vHlTfiDQ8$|y|Lm6Av3Kg@N?q|d zJ3^>zI)4X{-8y!H;NBVA)N~sQ%7$*&o8#ANk0*dB#ZS8yNz~~?gWJ2o8NAGKST^q^ z2lLwK<1hlT*?dNkVCJRw-_ey*lPCTCxp{8ZM#xh|}YN1Z6Y%hKTt6WScc zt)~;l#7gH(m)+m%r+{hzanlc-K8fk{6q=vP^) z3k)jc43gR$q;taak8VHC>z13Z3&B>EX+s?tfdx<4Yc6}Q<+M%G`fp&tVMsYu;L$Zk zH*LaGpT6^gYQ3O9u_YTVkzzO^FQd?$JaM$7@u=-(-QdSKsx(a|vX?|iU4GO!LtTqn zKdP4(_XxnDPAX{hk{p4N&_l+^mtx-p z!KrKAF2uFBvNt+c1Tk^m+`%Q-K^dTkR}0T9;#?P3>J$XF^^Co|Sx=nzR;LvU_S5rS zN;-uP(X#b;a3671H`S77FKU19f+0R$4(x7PaB&t9*a$4A4%N8#OF6a^t#)QvK!lps*y|%BxWg|~WLl-*hprnWJCm6ZeudJ8yMLuIX-;{@311@?S2m@4pJ<%c<2*C{ z6@S4`6?^E0g{#kR=!NFyo-1i;E5=H{B7HYBt0#Rh+9k$B-X-Zow|$v)|g$B zTT7DO@9wG?`Ad%{X~upfD7d|KA3}=eOnm1?0%2cfl=8#uqJi_{au@tY%5F{XNU>zp z5%#%nopCR5HvVq7&a9LXp9JgcOLOC#)ayYkUb=;Stpxe3gOU}}@# zZ<0t8&jyITwQ|l<-}O7WHIV7vI{TMJNa#iu=QDfv9~Drq7VFpw4UK8s*RstCbtxX`_xQc%f!YGtTed! zA$Gv2TQ->(IiKxw2({Tx;c3Uf$qR9Q+KCUv<27Cg$#a_^TH85(lOpIGwHk1*d?>Lb zgS-6E6D06EZGjPAby_2Mv6HGsq_E-;TL!-2!qz3fm`aKEw50$aV>#vlR|x%`nRW=} zOFCRwY>x5NH@1APS^*@q0cMS#*0LRa_ii6;;c6m%{JSW}?3T#lz9aK1^B1BzR9`OK zkbl#n_%5DuBaB!Yc-#2PAdfg<2qAi)GE&Us?#=+#VacZHs#F=xPC4~`j+uCS zL(_@vcGq(|Ssq59xaq`HA-ONb)Bfd4KHCUi1rX*9fJ!Dm zZ7OKUiw>@lyaEB8XbT~~rjx4{5nPe+`3!Tq*-RerVqe2|M&;kv=^%MBUj}^4;BH1? zoRN|Qh#vnGdBJNkkz|wYe`eC3R zvRD?;_cvqHiKK!Imy0#{0gVaA<^9~j*(k!^Rhzm~U^a0aDNB7QM$R0+x+Tzv>Ua*t zZdnBoX>PLgeH&C<0lyyi+3B4)c9c*)I0$r_W7-rLfnwRcW+))<*s-MI>wi#NC)?Mf z%c%;|gV+vSCim!lh}5}JCaI2xmTO`v-F&yjebl4|>litH8s0&eP97YBTuj!-{c%wW z%OwLaGv&5o^FqIsWu6r#Z`)RB{1m#Brr-{Dqj1vi#eL485mPU2o;J566rdR1T>1G{ zB+0S$YQ(2&!zIT*yYudm41;3K@c7kTE1$3_78qGw;oYdu*2?xO$2pOz@HTzCRuY#5 zAcBRVt{3x~$XGd($1n%A?$et)<;x>h9y?|?pyRd`kr38>_;gH2v%X;U_BcglwI0Pn zHt%m_f;a^AO^(xLl`3Qd@_GH@$k|r0#ntDs77v1Nndh46*Pna&t6kY6mrOt21Bk?> zO&6So^0N@*#@sd0!8PB?YZWw4E!mc>HEbJ|ixwxbGmnwUhpiw|QicsLYGp z*~y7GGNG%e+MLG7h@oq`=G1T57@B-xX06Bw+}`y|R{Op}+-{^PYICI;FW*{~EMuM~ zGauJi+8vmt`Z}O26Ms)KYw-awTe@%MqDOz^foiE9_OKTqD>VMvY?(CXNvsR>t(8z{ zc%Wv%l-2gfSsDys!^z%m7=4|vpxpi*vfa?jmL5ItnTwqxf+jYl5Twyc&G^mZjFkk} z7iKdVJJI_>K&hIJ`{=#0>%ibX{FYv~ zYbcF-mzeX`oiB9zd>0mjYLpV_*%PV*7PRZuY9);9MNuykjAx>VZhG5Aeh}bZNWO@^ z+YnOx=B^8YzpWGenYiZhjE7!}&o3-1Gbt7I8kP__n<&lgTyw z7buqkc%uyMU34?e_~K^XNh0C1JC;z;4ju_j7tz^ob7nV*G#`6;c?yQ8fcbY|;))W+ zWPh=o51=L;wL;vksQR@FHU4QY##B2vO3c+1lAg^!!(Xu1WnC}J7yP!0T(-EXE%Soa zzG1rm8H<(}-KTE$#XJwEr+7I^SP76gy-q~@sacJ(OA@Z@SWvWHMbel)ah32{wMV29pa*$%~}y#%XsOfSCDm|mwIzBBZr6WPPRF1VNb zaIK(a40HA81=|=at|k+SscsdY)ozV>w&_I)s?8ObC1B}N*5-7#pSGrXDC$1Kplft) zMw|MsTv(%coN@vUJK+MLJW1nMV*tE+;aUq~Shu?G;ws|DF3&lXLX6~NtOWNqUC3HA z^b>2>9AT3ApUZ2zz&>hC4GNORnoZsImfgKA9{)M8)CGs627Q4J;_veC140Z4`S(=5 z8Z+~45bPz)J#`bVEOP{44{PovB&V?4y_!wc-c0Hj7G*6i=P{gbnG=%1sW2erq92_p z$3gZ5!fv*gH)&j$7;HW`6Z-qUj{qGF%}i{<*bC%^zHe=%$;7;4_SsSWbZ(slI$i>*6n?NuAXVW0Uh5x5iq8V#v9#kz z+GMK|c#?rxa6|c{Nxu*u3+{RQl63VxJx6+0>L>KGS~KfJ2djxpmV3G`Z%(O0?Pv(9 z^66nOrE?_XpP-^eGnVqrf|5jY_BaLC4YG|M{%UY>JnF2PhTx8pOcA0jFvGsQ=R^;W zj9&&fk#1%!;c{`1FO&Ov?<>+$ZqoS8b|Dq34`kDvp*QdXl6ry@Uy-GXGZ;@JSVwZI zf6&2stMs(9U&dSgis&}ej3le9MF^C?P8sw37+~w8*L?)IA>Z)S9ZSAfVo%KJ?%0Mj zZx#+EnveL_eqVmek6%ZUIZ&@oZ#$FVyu@Z1FY{`#)iX0kzUf0`D*->*z?($>v}~+& zmV5lIB=ESS-*P-)o-$7dmtzNtu=eV=%WIf1y)KEx`hIwqOhkrsyj~D;{B0#uUYO-N zX5+L=aq5TNEHHB0>3MJFGgXPTIXZ}^4kw^ehQKiukX zUJ~oZDWU~uF`k#E9D1_f$HX5`cW+ayTc>cwR~xr|^{hpn9P9m})fr2^3tFGDM_o9^wbMvV2{V z3ZEoR&wTK4hZI?fwjEMrTQq&`-mJY!9P_!qEe?EfXL}#UswBbgwvo~NM2%_U?0%qQ z1n5X8ZWio4S@0yoS{$Yjqy6wDIfg<2rDU`6N}|EWxz*Z)1nNki?%hzWas|n?qt8qx z49W#vl6cJD9!L`@!7;hABT68KrDks`Zc^*yhdEE*2HEKsvS&PAze}>p0T`J*ezh=< zQSsDl640#Z(9**}%x~F0P+`U2GUX4L{c(CFB?^fm()cK6kaGnt4}%;77es8$rmatHLpW|#RF zeqTTdHR!5(_ba3q4PBsS=Q*U;M}pMzp)h;jU7^Cni;F(PggVqn9(n}lwwcX4WuoGQ zJ(Au4E|XP+opfcv+E%_=y5QY~?9%WygefB)iQk#y6d(8qEOgTc7D4R}YHr0q^3XU% z3JR#J9XlGF>SeDmR=o2W#xIz=UsM#qA)LPV!}6iX9Qo)@ByVohNZpDIL6v)=Xixq$ z@8DW;p&cE92xbRtvdfmPQPjW_uZ*y(Z2rw+$oHl*yfbvj=>hxl`i}T27O&~K$75MW)=Y5j-#EFT&hgyI$zsqcQ zH}+3#`{!tr?{qs8--6wm8HYX0$^Sq+8}uClj6ki*V%( z-m72L9^cG(BMOZ$tveGyE>JhEwQ&8he}g!?F5F;djizZQ>|xvrI1?m>M}n=Ts9t6T_aZR$B%1 z^A}iSYJz{-gb3TzhqBwOpO_cyc z_H?y7f$I%pt(Og?pg@xYc(ZOx-}{xXW+s}l7T+7^F$OE$AE<`CMe?Y;d!CL#DRYFK zz9UR_%--8bbWg1H^6Zg%)H+i5a4V?XaP@`I`NDZy=-G}tU9|YU>?5LpO2)K zEOFIZe906{z-flp-CMPU?X;JrzRW`3rWEt>`<3EtjtZgYief>5Y&wywtFT3%E_gHZ z=+gEGBJg<9m?!HhN@Ru@wC5v_E}drThm8V zJRSHLIaS~0TsWJB?aUC_HM)dL>J8QQ;)z?@N9cQiKmRo304PockrLO&st+B*BdvMW~x{(Ic zRby%W?DHZUQ5cxmX4|&z{v%Ubq8l86RwMJI_XGU99}~u2%6cF&E>ZdkHw{Wqxwm3P;o!(6Gd|veA&v>(c_e+_LdV>idr%TM7@(Nq%ppBFXNQ9eFLp zSBp#cGn}C0d4fpN$_It`BBuSTMni1dKsFG>q&97&sUK2tl}Q2SOkmskvwJ`qQq)_9 zAK+!6ApJTgaNMau*$yckM6)M1P@Vr$3Gx6}nFTyJ(q`%LWjApCbM#9AtOwj+n)&6W ziw{0j+uq9ZrNlsOgH0>D8t;Q19<(Pty|fOK!seP^a-3CD=OXGYIR_k@ofgQQm?%t4ku^A| z?^*~*>K8Ay24NtvE(BJsMZFEovu_h@Hno6rR)IJNGgJIFo$kC3q-&AWkOHocZ~K{K zf!?%Z!@49sCKx)G_DMQc7%{z9CcJnVr%;%dziaNyM!aBQq zrK!m)G&j_f%C9~+5**-@`Ej05J#=TO<}j_4xm4Ys5DL<0dJb^q3tjA%sww~X)uoyT zXhD5T)f0pq%)C#EYTUiIK`GeKIWA4V`Ii$OdUHDnAr<6Ry zi*>=b!4$0D%a{8{@I90*;QT-;Js=gWDu2znm4-uO81gE2%-Za##^>x2t2a_h{3kBX zLZwG-Zbj2zE`bmt-v0Z2?D7q4Zu|*oR8i|f2~>ly(*asy>&q>1shAI`bpX~->H{{0 z->V*4p4aW$2q9=(m{mFPesrM2T4QcydqP~z9acp`XN^Ytf&0c1soSA_r36Ab%A%i% z{uV};;T**WxQ!SBakZ0`7pa=Pm7kLd8js2A>o{io)jc!GENO+@{-EU)>e@MK=xvr` z%-XRd(5G_(3UN6jpVDshT(J13L{JymY*Pa{Mg|}ON<`2@Y)bN&mm{Iz7R$2nf^szf z1^1Nh%JsDPefb{^V%7qw!at9Hh1jT|DMMWnB-z#&u6js$ZH~Yu2yT8?h$;4+9D`=H9%184OJ11Mm(!FhV+X7bs% zYZQ=|X@t+_>8e5(4X7U1HT}WaK;olBwSYQ7coo276I58HAS@>|&_YoOLC%p;pGoT4 zo5^i*)xUA!YH@?yI{zP`v8+LCiOxI}ilbZvtdB%|q8fUnzE3fT|IG^q_g*d~%GvYC9(CW6W9= zcE-Gm8_}xD#u{gcG~4FFt_F5@f(Z|#Zn79Dt8^h~XwnQsm)o!@MhWmXsxAa}C)PtMT^ArdrWu_tCQvi2Z~Lr4=(`hS4; z^6>9802A=HKCsLfy%Gjf#I0E#fcPc&havGzz9%fFy`i{9N}5*x)|)W9YIEHl+xnCEo^QOrD1OeSMB&X+U2mr zk#q{Ik&U5>>cS5#H)E9|RH5s$0tC4o3UmQC(aHsr&6L>iV!jVrFxU}oOUT%Zq*5W! zQ2fZ)_8Iq!AzmoMoT`VaT`C0C+kW3}SDFF&^6|p3pPH`2z+u!*Q?sM{HxDqnN6e@m z$L6L88mZ_$hai8ksHKC~hk(VQ^*tbPl-5?X&XiVeOGCp8$4XGSv|6PkvNT1PAEEsw z$qm`$~7BrW2#W`*b*(&g`oivRsU3Gax#mDx|W8{ zgPu^ux=-?wYJdT_J)lN#WJq}+hbjos7b)t3xchw%#H=(gRJ+)8>_~}WIllW@0ij$_ zCFGpbO>9RGZ_P1YIw%RF-BLzHJ%9_p$?^@zgRmxA0zpHEEiM$AYx!3iP=&8;ENnr3 zhJ!$*7_~D@FD|socL9oX*eB-9+5PlAloN%FzX~Dv#GR05Gwq)mjTr5%oH)%1G&%-CKDeJ`m|~j1H!T(iYQWdpvZyL zqbj!i7k0)HI-2q#?~1g4gxX6+M1`P(+@46N>E%Om)rkwgnwSW!`O3pNq)G~z{nlVq_xh&t&WKb?qAMh2Ckmw+-yU=aN*7?M& z1yD7u(WTLf9Ua>VHJ3**6@nNx?-Uw_Wwbf8dsdB(00oG;mOUU0Pp ztv@FUVo%2KyFvQ9$i}X*PwqM(um#zze8M zHans2y&J6(*UqiH!|p}ffGpcAK6Ib-509iyPx5;1g1w)eJjatK4@`RqRZhyT^6^ha zCn_KbE=A%TZOv}D|7*jb(O<4+y2_C&TGhb;RlAOTOjR`(SP5H&0 z3vs$Fx<@Y17MGRh3dj17@bA7xyOz+BP&VIR^L}>O+MO0Ok`1v`F)hXqMZ0XazSvUr z_S1VT#dVlf)Wont&*>Jm_89UsnHpE)(qA!$-+N$} zhgU-;_j<2&Hsv;tR;L8@_sqoH4`oehCW}db{Nz(vIYL|!77;$x3%uWRLf)PP2O5*- zpcm7sTRr#|RtYFeX7ZKYuQHivJ*q0}M3MsUDMsrUkMiltL{&}}5)O}-lTzh=Z(Br{ zMLKpSHs;zdGNx6r>{H$q`mi6>`q*5=cBjX4g&kayi^YU$B^tk?$AjE+jgF`FTsn0&; z)UnIMdy550@x^>e4yCz1PRXInpPQBW8`8OIWg$tu04ZbbrHiK28dFTZjdaskPiu&? zk6rQF4WSxU%bwo7l$F@kz6~RS295?!BNC9PL|$g2J9c>yYi$KAIfb^2+vDwvcvT&% z15p1VGCWD7He`tC&!TOPNoXDF0I8eK=1zWQo9Wbj^OhRWOCjy$HtyI z6&+lpJ}dBWP#Du{L~DIeAMef)TnM>eJ|r~V{rFoX3W@_Jq5u=w3mLWNijoP`X!^qL z7K4*>xiM>g4(B4Oj#7oMj$A@&uRBY9RW08rYyT&-RWEZ5ev%4w#_b3c^y+?T8adZNc&bZ2w)Bfn#}38yvIGzP{%d*i#gGA_SqtLvDk1 zs6h`~hBS)4(nk(+^93yQo#{-`s z!+)uCkGv=3?0VSrnu#^<*m)G)Zrg3F9rtdFXW*G}1&l%Z)XLoeT*X*lelY}4B$bYa zm&{?um|%UX6*V-GwWC<&eIK5tgw{ct&Oj3gv+P8b{dEw0yXUpNq>sXZyl z>U&^AFRIlJ(eh!6;*Y@$u;i@rp-XI;^`J7la~Hdu2gwnu^t$>Cqi#Ty^Muk zrj=N%@SpajWogfd)D|>+DMt`%os=4b^t#pkV_ct%F8-+0p3Nbx2_?-pS|LnHeDLMX zw%gs*?Eq{ykIX&{^HJ3_X~tOcMYmVaE~3b)mN30nCa_G|ac2n}lZN>ImOvssjI+m6 z^7{c$WN43Wb-~2GXf(EUOcUKL7YHX>LLgHu0kjw#kS9%Fs2B>(#@qkiCr*P#bp+}4 z*un!WTLv7Ri@*24Jv~UZ3QL2Y`plw=EpdK023{iUfrgITH&-IRuMp&_jYk=#{FWc0 z8lk!vCaOL}6Gl5nkD&+=4jUUxv$@jjS1cDRI(E8sOfGn*DG%MtA(mIvLsL&T#Oeog z&<|Z{ijj4pBOQ(HQwP!ny6Kw-pS5El;~$~tQ#IQwEr_7ZC#XU{LTY{D-rHNYxb~z7MO>B5t2a}6{uu*n?)K&BId6y--5q+; z`2OPs?YyzA;QIE)PmdPd9uihH8oXM+7rJYbi>tl&9WQt*bc8XU%4dTIun~>25ruyvp)GGk@mmt-H_HG%sI zr7@L7Vum$!3W!u4#d_kVmlK=E`UROuKbAiZs~BoJDJc-BAKxhauq=KoU!&1ol@a(5x=GsMwh5Mef7uKk zcRm}eocZ*!?P8Bd@R@r|sFtBgqrG_7pM>-` z+9_xg8lEIQjVUD0vYlnV-d(!~CDh8Sa)+oKQSxc3fPz5E5;d~IbD{B~ zYKoh@wByB{K|U{1a&N<_dLbh|--WR6#9WtJ^{!!=uRFIGz_DxLPhT&X%uy)Sjh?(z5;}wSISOFV z8tNR&-Ept!+Ue`YpmQs7lN6slE=)7)+r`aidpC+bRoj{Lk2s7}zvlaq+23~x!Z=g-P z;{fK}OL^oD7kj&lri9Oy)Y2Tj_w0_DC_SS z_9K1g@YcWfEnNDrcC9xv$nqT^N&DIFL7HB$FOdBBB;lV-s}O$k2d^EY)9{oHTC&}_k`*bM$T^LHo%az ziSh4t5!Gqo3b{4f)EZA(%hGRDW5wK>gERw+x%uo2$C1b8LmRiS!amnpmgcafcRQ^y z)`&jkMu+`BW0 zIGUAL-LKGu>w*(6VGJft6U#GMw!nCaA9NoACmL+g;Cm-5+F))Mdg;$xY`LEr>_tsE zIp;&DmZHUpk~fAG8xN!nRfeY5;jH7v+;wYivRtSOIA*bHj)j5av_KDI@h>RIdPV{7KY8u<$FSzX(H98pZr|Y6x#YIPAqzUm7>z$nN?{= zm3DRCP;VbB3BAKo+MYn1nvz@hS&oV43-x{P^^W^)Pu!tpM?!Tj`J24)wEqj~s0 zh6HO(e(B8QJ`7@Wc+eSm8RQAL#rh7bZG4yh!YA-WrVv3IU$r?y{nGaifGArA=!Os6 z_DH>rm}=Zm!%2&2>w?hr9k;X2+z;4LWtR>vJ9(jw=<{=Qo9!UELO-wyrMQ@Hn+=F@_xV4V_ahEFe;?Xsc zH{Fx7!?NEYci7Di^GiqLXJiPM75OF;?`NY_nEQN6NTGwacmUbNl=OJ2ZOU$LE7jj`ymW`+oHQaXLFYF?J;va$2b&C}G%P-`)V=kBYPS!-HJ0yC#}<(vH;+v!|NwWuJS_Fo&s4q^Lm`|TT9iq!MBDlZLdJG{)o zZu~;a!+&k_0@}v(x;Y&}gWj+Z9ov#SZQ~C5*!|4RIuP)ECKUB150Slmc?c*Aenr*M zB(Wuv?@qqr%=e_02`C?oE=kS$Z2U^yxC0Bf+&QT~5^O;wfR*#9yfZkoTpB-JxsX#Y zX_$PA>5#}t_Ax(=XDb!8~Hx}*1Uz(a0kwrN{WBU&WmNG99r&v^I6qVk_Nu^ zGgjENtL@MX1RN~+wC}dsExmhX%sixC7yfZ~$U{+%nScF#!D&qEAy^Ae4^15CHzRU0 z_p~V4e~m<--I6x>`yTV%`78REzaOueVyGxB`p|Oi_|08`N2tQb=aRQ9t+x@Z;@u}d z8V>4EJq+eF>M@9;ZLL$iyzZdei<}=DiEzWg4VsjTO)*-Ec3n=lrim*SZlzFnGeR4j zDD`{9ApSj%tkl?gnD{=+<(*~BS|fHthUPO8*G?9WWz`&7W^L&k0S)qyW42x8S!H3N zP|j~0j;K+%(td)d%$H!fGg-Ldgk%KdlZpk5xYtmq%m4!lmvt@r*`Mkfn6nk}MRQ6C zhHbj`!NvheDth6P`Y8z)+*Cotr_K_5+)a0R+!~u!ng6^V*n*WN%sud_PH~vyi=W0j zbZv8rF(+uWQFanuXZ?OduX7924@zS>hPp+!*=^#x;KCfsXd3?#Mk02-l#12XQ_jsWxzpKOAHrKY-1BeTfGijg#TOYaFIEQY-0^nlp#V ztJqL+T7MQFmr*m=`F8XQKu@(uGTPmnkUw;y6OQDeUBCKAu!w*&K5B#8GgM=rC+YFj z`DbV&6m}PY0@PxNtTBQ{4*vW%YNM**);C#4BX~{E11lfnRxCnQ+PBNJB({y1`&!4c zPGpEs9{7`%?%jZGO;il3WQwR(2cJ(DP#ufCF;2{D$U?4`=iBRuDE08iD7f^W!tn+aJ~&S4~SEZF_p%?U+4X~ z&tUEvWJ-{ySs3|L_5l-K937HUdBium$EB(e4&DNM;NwJlb-(SZxjlqr=)O%G|Boniz4HM=W=MxpWY@3mzoQ-RN<2JvEiz_LlMeiE>*a!MLwtMWJQSR zT_5^r&@g+Xko$cZ+>TdH&W3)1akX2lA!zp@Dux^Lz3Y;=K@0 zlwu~aS$^A&{;Et;Hi{F9{= zv-bP=%@cv0dOYdXPw*@*sq!HDLG@Dd@-bdP$!EG~-rDB4_9@q3&UC(U@G7XZ2{rl= zAD7vT5w!|y*YMYAXY_F7E%a_dOKpYaV{tQrKee}s)b$}%(Sd9_k!_PL{rhl>JKX1T z2f#%xE_f)q&!E80l3U9kwW&w*jfBr`sGpbPhC1A6864VW#sovB^0&(|gc97k{_B|F z^FT~@xY%+ib#12OfL3O2GpX}lzGZ|{8gc1z5*EE8soH)Z(#7}ai5dO$x+@h|b_g_@ zSu49-Bq+2vegsUfZ{+XULfMIs{zP4rS%32T-SR67*o^}Gy}CYyE0KrQVp(F;wQ!!b zPr9PZFY4cWKDRzON5yKM!hH2-S%SSL-R3m)qOMb`DE+if1*|919?@WKO5gsn(9c5* zSMj9ik75la#>%>;lSKa>jgaO?Wx)~}=_Fq9AydKnm2z0EQt^i|_o)V_y!L~3*| zHQ0pO*|+prGj^Cx+jaRl5V-ZH$%c6{TlQ;DYw;u^ih*zx&-&YUG7}Teoi!Pg69$Tv*qk z){!>Oub8E8eB<}9jJtUvOm01=Ew^%JT{_}G+JzRvoB?(1!T5Kv%}3H3)yUX4hN zAgQ+ZZ>+1SoFkoz8c~auGG*!S!XZ!l(qP$uU1sIc{^5Z^S0mr3wiw18964# zdFo})glP`f19M#~VI^sPT$Se@0sX=AcNc3SzOA*!>YiPb8Vu10(>*2PMwhpcKm2BVnqq1D& z>>>KB$^?1tWka!EN=4Drs=@1ZYH!u`y`99i>z~iAtM4s6+hqp1e*)Rt$s;*z+TWYC ztyg)g0Adu2>SbWds|8Jo{?f9cf@DJs68B*eoDk+I_eDQyE@AF$#{o}4y8r!XgHJcB zF8_ffdB4v2nQo&q_r{XJ@LqmeU#I4~jrEI1&*RHv=yVH?QY#e_zPNGAQdud%Kl^>K zwfK@oziqlT|JHFRRf6Nmg3D_>Kbq8A<4A22)L`RsO{WD_I0!$}191gTu38UW*>Ca_ z1E0ve^;fEw?N%i3IeM8SPb*WIR0zibqZvH}h!tK0;bfMX?{it+&N}Z;e_})Lvu}AL zHn*RQewCR$AyCvZAx;fGYc%1MVy#lw5FAim6Y+`wZD~u!y0wDAK~#^dsIvRT*cl6q zS0ws(dx*nu=u^bLVh@F1Lj|XWCW(Ic<27*|d%Mf}g){gDdXsN;>?tRoq^4v%lwdXm*pY0OhXT~q20sq9L#%? zkZerqpPySE`z-Z!+~P7J*&HLwo$_ag3U$yDtt~yjv~ErKZ0q}5q?Fgkudc3n`yw)Dtz*Y((Z3o1e?P|v=AI_><&Rd6IAr5fw|s`v?npJI zOq|ogf<%AkJ7;h{$N*R0%`v`fmP@mQ>I`V~@POv7rZ*3<%j4LMYCv)DWz%sEWBoDV z=#r`doXpt}bKq}B80SKnSovu#TnVeqg`N!YK^&abpkGF(JK*3F>IV7a6v0nZJc@^5GC9%Ga>`hOqaEOzyIq6@=v@O>%QekVn^hVR`8qENy?grNie*y>X+SC9&log1ac0t`gGc zp-^hAHvP!naRXOaWZe6&Xz__=(84EYLsIK`0N9v|BoNSbH@z;xLLURqvuq%}&hy^+n;}IxVcJ24J*#r5ZaE!>Y*C-U9&VZO2Z@ z+1rFZX|#SR{}HY`V`Q~G;#txF0nx9luTZQyHCL|3BR`-9HP4jebB!GbmX-&xDW@@~ zC&lH|3nq)v2iLLIV9(m28E^~$Up7CXK*uR=Bas!9Au{@B3SmyRW2e}sVQk&_)q|Q) zInNZ)|F8@gc*-h)1!S;CHKG@@v??kOO!X}P;nxFQ9%0hQ} zD~KRS?==>%&@?K&ih^{I7J3w=NeKedLPweyI*7FIOc4L?H_x-r?&@Ub-g8eszjNki ziI70d;KO8v`reb~3-z)ylh>8$fQWhTeR29`%XZwf*Y6`>yasV-8WvhiAA4l~GbD_k zo%|Qzx&XW@`LHZ%@FmaWy8%E3zGIKre(JU`u}uajmnIPUH#YisVPbyJ#|s_Nq)_#GOdIb6yXYH^oNcFD=~E6fn~rt zoci@as7xDcyHzX+YUugO)0!J}VPHxfDx-t|DXjNp0cur_ANiJVkjk{cCiFxOLg)}O zE3vWQ-*-bt(zay}lD9hc7{*W?E6xmV<`Q&EC8bpUg-$3z92hOYBff?g-oe=k!g>oK z(EHUc)B`^e>;X8Ma^NL~z6%vZ`GCr9cm*r2uqIk<4fB_%&d|wAg|$FR?SODoA>)zj z!3~uib`Y8s1Eb}h6)CSmm1bKBp_)wS{~Lr2LOJy1NHC+f$*j!!(zH9!J5N|CV8w+L zlN1=5CCZswa+Tza7Ifj#)L*I9I+bw z862V6xkA?0fF-Rpu$jojm34kyzsu-rcO0~*6u%=F)pv)YoWwI5K1SU9a}_{%1V9F& z*ZL8J=Bfbz%@w*|B1;HeUAdz%fH$W&19pxYb8b3vLnP7W2I94)bjTMRQ$~&UDR<+- zqNUoD6lU(;AJ7$RCWsCwWIR}y!B{`0BU+eny(2m8KdXhx^sre7o2Y?7a8uSK*cF2uKPI*hT=KARY7?rW%hoHr3VTRDJI3YRB zbyEEKs~(l63z27&Nd#6XELGT+ zVk}jys}5|Q59DNA;&Khbg*~~imZAby@fXyMYdY|D20WLu*yC2=%z$=enba&N`B$gZ)8n1N_fS9bN zW6(m;2eTsF6&w*8QB~FH)2QrURki~E>i6IpsKv+jKSC$E3M4!?7Cbd&U4 zBw;f656RGBT|}K2P#(;nQ`fh#K5MagaX`5nibq?#Uai|)U_WkZ?phi z%`U~qi(L>QpwPqJ;zhTu*2~vJ_7F8o} zACkuL4Z2Jtj9R$g0^qN5P+AkJzk?_l1%6FoHig+(M4ygiuyjx*1Z?(KzM>^PCnAqOp`gggb5Q!r352>G zJq+%Nt7sb15Df!{-vQ$-i*#l|v8Gm)4Qes2ZJFEG^GMpM4*~xJ#&tOnx^;nO_U-8$ zM*D#{Lnu5OF=_`wN97n-jYOk^`_M^ei40J=WW$B&S7=W6nUFOYS>h?knui{%Oic4l z11|({%b@YmfkZeM<%z9yfkB;C>Pm%;!+jw4JILw+!B zJ=QH!eG7dy1P*C%=z!)b@(5{EP+7KMjG{bb7I?*K;Fx0;GFk7~JC@fX*tsttCjDXQ1)D8!k@sKsIu)^|^;35X9zR@aJkj zF;s1uGXQk?@_?l!qfr)ITuK&hm`N9?a)6OEBchdwB8e#01nEe*FJ)w49q7g!do%dk zJsyCil|bDWn(YJuR1W4D5v~F#VI-`8G0F_rRS4FH(htqCdK6Sbtx-Rh;;|Ah++d#$ zu5kv+Qm28XhAe?RVJJ&wGg#^Xbi~>)0ez9;f|~^+n-Quagx08QLq)N6;y85i%>c@d zY){(?LuA7RtDwvHe_yifVGe?gJ-8SFz9gkq4>~b)Tej<=^H9&>#ejgBlE9BVgRY|M zbEZOr%0dR2fj}db&%ud-txy#mfwCGH@g1G0R?6vy|37>3{v49SkkfYeOC%nEfKtDc z011jv?xC%cXzw=;pwpSGqT)=5UJZb@51>6CljDo+_G@%OsXNkYZ>d(hjqaU z^=MD95imOx)pNqFo5c~L#LsCV0WIt?iS6wdr*nYsm!y=vGQcwkjYz4R(sihXmj(*~ z8Q?}DdhpkFA8;*0@B1c%N9C#jLXqAxz!=w;)LvA)_0Vw? zo4WudilXbKQTLeWxAdCfjUNK{5CdF7^l4A!=+%Rzprim79y(;(GOWn4C>p*ylJOH) zCz3ZhV+fpepklL=BSVQ?M!OJ@*n#M`(Cdw&+}4?JvE(MnJ7f@>qA)aQ@qBp+r0#Hz z8uoIp4v2C_V6Q{qMNqZ(0D#Q{08@k#fhrIi*@pAef@liJ zzJqAvKcCYd8O(O=K@%7EVSOVo3kFTF)JV#vebZcS%Rw755fvkt0p?GySi8C@hmqQA z^3)+gn5qQ77>5!idNMlDIuJ{RV>tvEb|69#SPseRGysK?+s7#^>zpsyehcu=;6D7k zE3}phzdD?M4NG~9ni^!J7}UsB+T$*NLv2VoM!A@X&+;Cc_>?@&WFxYKmoD64yeE-d?gj1sCGV)k?}mE#4^&Gu z1zwNuJf_V_-HqN)Qmt~}iBIbfJiAnX;b97b(DUI9SK+w8N_yxVx19ZtRF|-My{3}- z&RKO@?D$IMxcw7d?jO#ORsyw=b1~a~TazDqP9@aeRX{NVD@F*yNKYQx3LC&Cg7*s& ztMO{MX($^!8^k@)P4-YEwSvbY&jwl~2Rapzt6+v;l;c=nFti>4Srh?^V*~KS zLyI4UoVjg6@}GN5f(Trrcru9_!`g`&GsDMM9**-IbS>%+KC&iV?F?E8NppGwModHL z=|?KG^RXQV6x8vqv(rAx^!h$gx{xb6CL5q7OMo*#gef?5NSHIKg%`k}4s|CGVkwaM zi@gar8_3LjR+fE5e$%0PXps#BRsN9Wf%c_!o%FrKRb|b=8BYG7@^UH_2 zF0AQiwQf7kM77-#Kz;-vRHVoam2=$R#Zgy+dmIu@Sq)WooCizx4==446%;EatZ}U# zMVq+Z>@s*chyx82`%NN+d5=Kk;I~igy3ln$tz%=^F`x{{Rh>4QX0&UMl5DmGW?7@- z4|6Z9d1f}!zCPFUq^OoejoMeO2%)utXk8)Xz_vZM+j}}`u}4=OG$*F$lY9b8yOd=u zE@AIXdMeAt6YlxAbqY0n27m(?%G8cX&D}za3#f`7MX?KL+cNnJI8Gj|wn=IIO9F}d z2G^szTk^i|py#WiPA2zn&L zc1Nq{`hDv_n!h+rtpw>(QO%lDh5BrV7r@ro&qsiDtLwa3XwleuXb~UA@40BUZ8vtf z>?VP@8Huz?BkXGe9IRWjd~VtpQKRLaaT6UaGJjYCZbNjcKv1c>te*`A9e|(6H4~VX_$FEa+6ZDW@-zTuH-d6gP6xsTj6K?wtKqRW03#IY*W1Ta2}+4|71__F zGYB{C8pI6VRQ4((JW%<5z;yyN#gdq^0bprFCJVvIaGAIukO&S;qEJCcab5e0Q$qPQ zISuIT(W`>iY6ztQ`h7KW^nX9T1$874ZO1JErUM1aiY?pO=JT$t4g_nyXCrxB`kf5d zbY#h)KsRRKPpZ&FOY!Qe@~ZmGFU$eg@1$4Xrp_J%d_xr4unvA}myp(zt9du5W^uq8 zI;8PkJ?Myf>KS;xaf{P@L4=cj>`=cK^xse>)G_iZ0G@5-cU^+);mqc)g>vO+`@tUz zhSZhx@C?X&f~ASQFOkel$6cDfY+K#}iVfQC=KZ8Jt2L)qdZNKZePV~_qnfk-AE;CY zX)I^!L3x93m~6ab?c;}|p&?}vgKMC!iGj)8G9z9~NdhO%C)^iX`ox}MQuK4R^3%ea zJjiw(*G_m&Go$kq!Oc0}sA5HeAypKMdJji*>?}LTKUY-&-2rzYDQISkG7T;P*fxNl zLA1fEg+Gygl3BdAN7GThLk1KIg6u>+y-dE+f2w;>?^YC@58lGg31g`kPyHv)Z(=Pt zz8dZ%Vo()C^@J=fBqX6X;Z*U&58Kn_wqRg`qfjjlJu9g7qTVr#8uVk_(joWl$B{tQ zh%(^09B(R2{(!p>O*EZHr2lefOv6TmpMg2V@WPl9bRUE08(|y;FApBQSA@~|7g2ho zng?_ph13pBhp-h%7y(@v4_)1vseBPz;64X*QV5a0SR(}KJ8*TIfh_1CXBbPYeFY4L zc>@6rjdjpG7Ig9vs>_v2$xzMECc|)p?Qnz2?IOayYP1~$&`)sZ`nXFfJ3ujM^KsGQc7LS|$U4+?j)4p^?;t(H9qJ@%$l{*(VGv2oE=U$yiDZz=KMaOXJ}unzdjv zuYz;Mhmz|*FnuH%l>Hxa{>;LZl-0ak7p|MAE$jHELXwUg)B;oq@hdv8#&+Ej2tTfa z^Ru|aSUuxFu+V9wB(x|6tCyELDOFEPr=#;~7@c)GXumFLd_BPLLSwqvuaI2q7ZQM8 zttYyO(an?00{Y^Rv4&q5ZH0{p6#(vfK1C26k_lcRO3`Gu_3(UYUMMDN5Ra}13}a-Y zWHmHG1snxwU`#-I=l~w1d|^REqZ{dd{}%4p-A8m)-vuYiUb2p!xr#m5E;l_-b}i@` z=f)fW$ybcl>#?XS!Uru<=Y+H(S|^5*zAr}KmtJfm$BY)rfmOp)dI+negZ+b#Q{eO6 zCG3mmqzfuYqoUytD?MnZqxEhr7|4xxP>)kZd0c7-7Ja{ zl6&GI$-`^hhy~%Bn0z46X?VH(+MPEM_m6zjaR4bLAmM&0d&3NNk{71O?#r&;3-l_H zaT_dsen{+X5U!**bK=#h>}EY)_~yhN>mfUf<@2QPmBnl1g)ZFz=i~aF@U4owera7! zCS3M_Gf0CXxCO4z591mA{A+uOUjfF|u4MNJ3w@>Ymf(zgZDEgckcP5ditDMsG9KN^ znFK4;CC`+UgJRC5;}@n2`?-@(B6Zp$Ee!XP0MFd1Q1GFXo@7Ze9@C^t9{NB zG30o6RGNOWc=Vov{j)w8%UD;bTp=24uUSXvsd;X(a$f%;X>`Un#dYSSmy~L{Ewht| z`;_GhVd_je?5w6q6Qbg9gN``-ViUo0mjt?h! ziH6Nd(+UkgWRak%N%Esz8-(*4Wy6r_&(8%2v*^ za`k}blq@oB4XBfLor?YS;k_%Z$Y}K(3EYg%Fxf2=i|M9;w?&h!-nf5X*0Fb&R7UN= z2@7dJ7~D_J)M!b413!GW+J@yJ4Bbw!7#|^YetsU5_)z0ISWEpt>{4d%=T1I$WXLlXfq!WzLL&qRmIkIr(eQcNu zoJkn=u8vr>qJ5q{W>F@ZqAP8!Ba2eh6e#L(Cqat0q!e1ZwPm56vSj%wwh^7+bz(l5 zS)0G^?B}!#0-POA(iPIhqh}VM#l`DOVMWS@`o{nb=dM-Cy!I-PqqVF0`FD@9oav;lL``4!NuV1d@lg2fbCe`bDyx|1A(mvRiUnHbGGiw*jfiBYJ*A{DLHhWfyPM~$9M zyeZi|3=6<^!9>j_zrP;7RI3fnjZO)p4^2LD^zu^j^~=j%F7no^XsrAa)%K0`{K+7F z{0;KRY5H1`typ!PhSZtwlF@jPwHd~+_K=09Fb&QKy{rEXVSO&a_|BGm;rwb+p2S8}o`5zDR*? z1UtYqb=S^*Sh;S#J(Wx0A09i0%t6hBAYJ!dWxrw9rmWy;S9Byi1g?1(V)m`t09M?_ z99|S4b!(Yy+i}t?=Ngu}A2>T;>YD7^_R|*i{|@H?G&VWhZ%E3#bwD+ut21oi@dYu7 z06%w03{Oq{csHReYLw6;LjM=0>*gOXk?-++-=AJYf=*EUHoo^ByMx-CK^ts?Hx(DK{j-80nR&3}`4QTF*p-gG1wlF$`ch-1w(;tB zC@AyaE-hN|HD>FCK;FvKt=I=^Ru6*$DnXnsX%4;`Q&SS9T34R^>Pp7jOrTN2?UeaX4K#bNnFyDLa}0L)f*kJR&b z|85~womZ(wCY#Ee912v5$S)Zqx`LpZ5|)ZbN6~OtWG?wa%wcam(gIXW4=%Q%KH}$O zzaCFKR8$(A(AiiZ4^An}X8FK&U-{H#YRE3KoSeUhmY?18RY``D%d>15FZ`xnymV5|*+AZx>A`Bf9*Y&XC7SZ^+v%}H&_C`o?D3?VI0@}!$ zByeGRZ|*5j7<&@+dGh#ToC?7asDL>^Bt5RMc(mIACO4E_(S0`?$j_Vudb;A1%+YG+ z0kMx{f>H1|@`Vs2;eF5vfA4Br(z}Y|GLIF$s8M(m`d{wwUot5Kgd1j=+4!eP%Kp$6AxQiHPpd5>FxE4Cz}u?g)6ChT z@WGpqe+HI&A;7(m>TfD}UCeiEXmtMFHt!QWBhCZI-UX!JDpaANJ=ZCLcO`B+trc$f z9v10<{al-ZMG@wTs{u?;NnW%LqwCBrhVeh1w^EkHSLatgZG6|EI&BEhdPq|korZ3{ z-c4TkYG-H)uWCipft&{xzdTXfOT&5Bch;r0nKziB4A^JABe5c__5<{sm{XS?BKl z07z56pGoj(hlyF7X!FWv`k{!BHR0+Ep~VE~J@_0)0qp*ykX+;-=dS$77J!|rp*?je zLNTF(Z(8=>nk|C9ktR0Vl-`-+D_dSFL3MougIj4>!>d1}o`TR%X*_0Tpwhhc>q0B! zn8UHB0&{;$l}xz4zZ2SxA7?vpX3_8YxQlR9iK%lzgn4UE)MrZ=OmG||%PHaeC!ZF& zOU|hpXZV88kyHK2C*hPs`dphnz~&+!LOVxqf1w*+)A6MrLQs2N5Lz^RZV3SHp?M8u z{-FKw8gqNYI36xLI9Ba0-j{+iacD5;u%DU}f&e7`9YH~Hfp%_s0UWVj&{Ipk{@N$O zG#G;v(CYYTbw;lINtF7Q`ARog2AUnJUg@0ehn@sy-!u*mtupfSs@ zpN)^1G@nI38MHFgz@>R9TdM!G&h$jkE+>GWLaVoE4bb^JjiG#D49|t>uDTyRJM&Xt zt%V=*UXwdo9aHZRI|6XlunR$Fbi;8^!yfmXn|`M3l|UF|iHR9?ew_O1&=>j|0$3{m zNc3nu+zDZ;+}l7&$W=ksVWpqrE$U!afZbq2jaoFhw?QiOCQEL(F(kLKR?bW%CFwT_ zmN38Y-L^nIVVripq&rZuas04TQQ>{Iq6OYU_M2)karE9u4lKX& zc*)x@dBYzdMYom|cz8ZaSyr|>CWGTtMXS)_)YzjNYwjdN)#*}zh@84`{85xYITGxU zANcARkU6K0IRPSkui{4?s+8c=o9)zfhG|mgfB1m+34Ng{dcsF>P_w<;mD(Lx{&}!F zvoT@#jd0P>scr*OO6~zZ5DzkILqrYCai~wZ!=r`7eh#GDO$h>4s#L(cN7@np-XKA0 zf;Q1b@}OBZ>HJtzna)<~_HvCb+|hWHJEHurpSd~qPkoXmZd>KR3|FMK8qHM)G?lKg z&}wG;X*Cv2S>~g?&dSH7>gpwnrm%En!ge@X{B?%p68|8jJY!4H{|YcONW@Y$$DLZ$1%4%r6H`n9O7P=cVG@~(GX@wgH8pHtpr|`at zt7NsW-?v-CH?AdtHMnLrTK_Dv2T+kARxPF!%ah&>UA42PHvO<$AKDT5aQNaIaEC-u z7*bA?aygzyM#sFnC)drnK1Kj=JL87E!~Sb)Kf20-ghoq;=O#|MJ8c(MY3bgjlXFAE zHC0~nCC`JdV^&5E{9AblN+0$|CT-`hA6#Y{A3S$BlWL;k!i(={vpMwF_1Kf-Yk!}^ zr#?T>*K8bE$-lh;XG*&RCDLb42fAOWmMfTs9@NhKJ zKrgORIAqEhZ0}udY%fc`qnzlD1MSe^67OVTs+%9dWbl||c~g4frEGwCrO3kS;#<1> zEiXHTJ@QX!nR@0cF$ToXgJZ-5vNefqzV!FRR)jbSuVCgLLpX(-nJ#(t85%lKskvbU zik7>QK&Fwn72Y&BJG(LDm5^WExRhh%N7yd^i!sFEN$Vw+YWCeLCfjx|(zA|yi6Xq( zN{=Xh3WD+b!$(MHVHlzJipI0xF<)j$**j2*mJLs0me$;(ByeGK<=Y>&-W&yz%maKi z)h$dT%>lMT{F}0&hh_~{bdYR4tq8-Qet%mDF&eXKW^#ApS4N&`XRsTs<_# z`pkzwfgQ?(B{6&aeW1OA_jq{Wf>oQP9W9n}KJu`37Dk ze>A9c;&Qp?b1fL+e*le((Chihg#Dq(=kkY!v-njUDO5>`J6y6@*Tdhq<4V<>DQp6? znd`T|4mDU<3GmmBuTny+Cl-U~1}3R?;G0DMn6w}HW>F*ipE7rfAhDIUbgJ@=Pa;qx zQ-U7h!L{t!G|`NvC*V5M3Ik%l4fUiSWWyEhz;C+~$6>?ovG$*c=m57;Vh-z^rKv8? z-L9)H(203eTZ@dTXM@+O~M1h-)QHou6B2 zp6sVygmJp0z1KMVYv+OXLpb*gE7xz`vSkNLmu~~B3`@7&9lkQ|g`FWz)FgP)#H7og zs!@Rkx@7BQ+n#22H1Fy6ZtK2C*`i%=tQdu?x)8dXbI!?kO?e zZ^G`R{NqU&K26)c-9CkfWQ=onb`5W>Ib3Zzyz08y^bH4@65rBO9--1B*mZf7PF)-3 z-etVBEJJPW)$eW^_$7qzifvqdHTHF^^`m|(GmyNOd%-L0Wpg1UhNQlB`)b1=~Y zi-5O|!px}5D;PcE(!queKInZY8kl}q%Eez+_|88?dq5B9q;rg1ePPnNWB2QyyYQ0F zBjg%#)1`G+7G7$d4R@Z4rMq4!%m|Sy^I4j{<4h9OyS6gSYk~Xqjr-mizCA_wY{!s> zseE$Ootls}`YMfEQ%=rGs<3vZKx%{~fp1bLXnxT#MSbG(NP3yAp;$SWv!+}~#iT*b z@r8vgZ^;uWS0elTR=PTBWhXkms*ih>3({gse=Cfy79`qJm33AJjuy>7&IQ_;D)JB5D7EM_K_mj4dY6q_DW5T=qXENd+X zoJO^ER`#6q{WQ%p@k&Tn<80xThqGpHILt=7YcB8gsfmUrTtmo zQd(#4x6W3b$5Q`ZxEO6reY@A&5G$T(fm>StULAt)sfT+rv*PKzF;N5)1a$oK6lN-C zzD2OLl2G*b_qFsFY^-!g^Iag4V|U|U2Hbi!QFOT4t_Q|<-I*mN;5C8KW& zJieBvZnKk^~Ig^hKuV-;KeWKJN-(s=IGd*_;H zjys^975I!K`z~Rr)cau~XLxf;fVgpixT*iz-y2JZ?ycY5v|y_78Z?p}+rr5g`T_g4 zA84S7-l&Ji7r*Ee67jQJ}BOH7{@oVkUMAI@5y{JZSkF>l}Y z`nmWP1x3}drwV$lH6p>*!pmKotw`ZX2)JQTqOZx87X$B^SOXRBVm)nbdMep!2m#{wQG(Y(9eQ^Rk&8rJw!WRo# zvuK_r9-$`JttSru>ixbXvv6@GeA*L`k6q#YNd3-bX53QGok)dK-l~iQKXSv~krI99 zVbEOl^3wJGA1r~Yx^I~yiw7z@ot9nnAJ6vf&QEX`vU15CVIrCSzSkj z>ycbG9E+tP8s$TMKrWi#k@~K@a_q|)ES0ALLW&}f%zs7vf7N*Y?Rub|{P7Sj;^A?V zoQxx1UWR$zoU;d#=lx@q$>(_Xi@hsi zN-7AGF;fw>`!4%fAO~#a*VU@z{W3ZwogIfY&W0Cl{yLGyUph1YHdMCY(~NQX!zK8h z$op5)L)=dl_DJZEqfCk2{zi#4tzhd?lFto1n=rC4HQYv8?jGxMIe(||It_0gu=2;% z5x;5gC@(N`*idP;}Pre>&h)-5q`-JK4pvcZhirR*@z24ds&yX<@Q!{inmdE>T^s~1ds%N47NIx42I zYqWl*%Ox0oN4EZp0p|iz7}cEV+W-17d`RRY99!-&frz03Z7da!-}#RFR^<2ZSt5Bw zK2O^|U?mFQP4Y)P?$FIbaJ+S0^uGYB zs*z8uz5%7SQvb44Ixe{_S7rx{UmbJN)p%XtDSO21Z>i%BCf6&2qGqa>wvQti&VZ4s zoDhhx$GG?^Ja_!60Dm66hyqUVg7&Ea$D96ac+j#l)PXa%-YV>RZ|BF{lO$Cy-T~8q z1bHQNz%$&L!rYyCooeEi;HFMJKwGH364}^%21~THUE|34`R^T8%tkdLNZhc@;LIKQ-Q#b1^7rrFp=3AoC?mgY60OgdKHfsKK8sY{;m%JiO-hsUM=b4<22`dT z8$F;nAA*r;ItOo?9+nycoM}3Lx~uU^Vv=W}%;fcW^nF=`-ba|gKqj4{tDpc3u*4}F z=F+&X&L8f(k9Q<1P_(f!(h69S1A&Mst;n`LFIbZsE^O5 z>EHZbr$hCUl|@gTPTE!0(cVf0w!LL0QpxVYK`{D{R{v6-=PMwjJ>I!hjVNR9fua7u zwXy80jxfCRThe!o16yy9zMK_LMW)SwQ`FSWHtvBg`v})2C+lHh^V{&`;(-IxCZ>F zls`XC%-xNq0t|=MPG+9BY-Sp%1ylUxiSdcr)b5t5&+qH;9*;Rmw9OR}I%Z~=U>fF4n?@pPi?XUOn;gmJ-e#`Ob;T&9S-*K#(?38BVE zOG{hL@$sO5TE|!g$g01@KJ6aXPGa6u2#sJJgfawuSm>qO94UNEP-RB4VM2QI}4ffNj|4CczpUJ4`(G*tSw5AcEn5xKYiEUz=Nt><{z`A#>oHnCwm4abCCWZmPhQ20kGiWBjxSC%@dJ9&>gVpxo zDNel{>QLX`TH06lk#`2&26U$Au#+{5c4}-G2+JEE!ajNMgp7nhsJGfXBKPco zDJC{OCw_nP93fUuO}d13$+!NCDZHf11`mNW#_s916ESu0O%Py+J#HrW`0P}eXj!~4 zxzgxu7^< zyn-cuKC3K&&)(~(qyX!Dgz`Xp{O^^rL2KmSx}ZbObi{4ZLAN4jXR?O{k_ms*1#uKu zTZvX^yrp`pYo4>k|6acJ%TFrTZ||>ftxd=*FUu!tVa|%jeQLlQ3KqUF!{V)Nu~6$kmUp&gnZv)gOu?TMml7x&!kqvrp6e)_Gf4JB7m1xD zwu}{_;(4{`i3g4s;G-WSacFwz#($7Z13vA232n1+&X6o<-j28(gZ6L+1EUYlf~@RE z&#RO_cFiG;qfKj<`Kie{yO|kQ0RiVm@fnM$eOj2<9dKp}dmSmEF3TP6=Rn8eXO{+j zAhqF?7>FR-4g!D7s*!O=wtV#5;fuB-q8H@^q?QE17eW(WO0JfB3U>j_t)_)Z@CE!E zj1z_ijQ?~0`FFX1#}t3$0}BNY;(mYW){@_!lRz#s<`O8(zts*XcN;LEqQimxMQY9` zCabc{OAZ487s=&%Nf7Yv4P<6gyqV5H^nN825z+nS9F)GU6o4{@m%ju*#g`LkD8tvWi5e}o zaDVbMdgwY>;(ItnoK4S8t_|&;6TOSaQOU-&Aj%CCP~mk|STw(Mo4%swig7wY5>U?aR;E2~1?z2YntDUfc?hRlco(|Wbh_DdvVZ7pBVFqHZ5&z}=lG8QNAZZ;bP7t;+*8KYO|&b+?WGghW@Zv{EP zGc+xe*0O5GUINLwb>`Yrw<}8i+UD1Ve}MtTSPJ{g6&ZI&v_phdlza|9#@j36E_I=F znUL?sJJRuiH<-okTf1ngb^BZ9ec8uAdUtF{Z*y%kQ|a3$4m=0i?=R+SJM2BsUVC4) zTl8bA_xr@BJrdyNL8?Hni+VYUgSP+`LeS@}vg}ci;Fz2BObIxOTl)9>yEkEJ$^VGt zo(es_P8hf~iZOSMU(x1MhO&(@Da?^DD4xX> zcmQn9R+M?cgk%)NozN9oE^)BZOE#6TNTwtk(ojUgiiwOvHH;G@x(Y7IZ4Z7zwct5npNSgj z)0nTX(-`qz8#Y-m16w>?M5Nyy`4SK&^Hyrhk>VD~cN=~sP&=-v%hLNY48jIz{OK;ZirEIK zqUeoRAvo`#ktds5^;pwg!2q)QnC(kTLkWvM9oWnl*v%NEq;6cvv65m{H=cO7+DVO+ zCN03Cz(zOAaeQH-swv<%ZxG4SOXhOo?o4Lx<)KHAt;haG+b_^de8&0h&0~X~Xp|?M zY?xAP#845u;I4(E{0{m#v26p&AfHhDe(=)AcAI{yRqeP`N)V>qM*18XQe|EAv)(7N zt!u~6!!AVPwjCkI{oR}U(zXXQGG^mkuHvp@epa#bfD6FK6}0SDX$e6N4V5g=&^Ms? zgKA1$+EVM8$hi6UlKeuFP*et?v>Z2Jd0>(N9{7{{LSQEk;s#2ScKits3e+&g@4%P; z7qvFqmXrEE5nJ6{mP|<)92)7~+(6gaan~&C6b@E2gq4T(=)d!;cHDzs zKZXd(bY88i?=j>u-`Apa*Or`NW>%z_fd*uUVDD!DWGQWW92g%Q8{k+1&Cf4NB4rYj>{r5C+0!`@-bSF(VfLPZxk< z&k4wBGL9ZG4;g$$i$D6%`xrBWP2s|<6aCJXZ zdNyzGor^n55hU)0q#0(C=Q8Cp(!#@_+TyG5KJKpRFv%&w^owJ z0^jH$Tf6O8xM;ocX!&6S>}2>}yQ!%i0k$!m^t`hR*Yod!8UAnyix}B}4%DTW2upC+ z1r{93jsbbxa)NdD14tdvLvsZD1#x$?!eJU#(vuu@{60P!KI)@q6Vrwj=*>kdd#{Eo zgCN>XkO1EwL{Jcli08(enReuWSHv}#$>L`U8O8PnF#0r#h-k0Vu_JXs?{6Y2Tcw)3Kw_<=0KJ}$Hd)fw#_%CqpZ?V_z^hl_p+b2yl|M61wPFf*Z9W?$Pd zxXyypyK|rxbFzq+X_9rqRT6!cZI218?&5Mq?doH~rl`frb)!G259y)B#qv+&%1VP- zcq(`Wv!Lpcgxl7Qu+eAdClRXU^RDW&f{-g&T>gQ=009uj=mfC1L2Kl zq=P?cPP93vq)l~8;W`zS^OB!9qye)7vEUHA#w!H^C6Vy7y0Qg)PqD{& z@9U<1kP*!}wF(thW9c9$^nUWZdh~g*bw6Mj&`bOPSZsMHvN7Rh?x>WG^d0L2S!9<< zVh$u&4@qW!o{?U zV}DYX$nuk=o!y5VE17Kf1+{;L5^^=@!h>VVSn;!TGrtUfrlN!CwWx-sH}Ai#M$*?U z85nTCvlT(g`%05uR@43zIf=&L;xXBVaH2)#*`mG&c9we(e?^VgZ(5|V-4-hMrWBy^ zjkm;|a#v8y@Aw;$Oj5@fdBADinO#3E#zwb+tM76Xz&-o%Q2X4T@ewQ6{c506N(pjh z_o1i;)uLuB@}bXZ!mlf_UMoRBdaetDi%?SLE|M!!HmVvC=A8c)ct_XW9g`ombR4%- z@Q@#&_@J+8UO*e*LrLCdbDFbj^0dZ~wXe0Sx1hHN#~Lm66d|ZoRJ8B;{UvW`=!IYu^D$)k6CL9|hSjs1nrp}fNIc~!Eo&bR zx74jIDK%JAJAJr1y=8xhoi}HJWW0sIj~Sar{aA&1geVU->ZWbKQy{3J)$gwY zX~YGn(SSFhe)iXEj!j7K=7oz&n_YWM{^ah#Nu*u@n8iNXVKFjX(!_Gz16@i})4-(!r5ExX`d9y&40#6_N^b5U zH3eX94UN|-FhJ2)ZI^kHmDxT0BYjfzh*GX*&nC8wP)!{+tq56wVmtaPBihZ6yMZ(A z8iT!L@yWbj1FrK4`bctH?J8}o%QV47x(ZYVy?J{gBnR%8TG&>ck_~T{eh~*Xs=&U| z>e@{O)*Y3RZ|_#1Mp32)CKlg1MtlsY6Y*k*Fr1~U zv@``ck2_5?&^&Eeow+Nfd`#z_s@1BQ6M4Yh*m47`=4N`t@&40D`BL}KIXeF>i0%{*4v?09Q(Vm-%QQX0ar|yU+3shl@yNm%o9pe;#nh`%p-2i`w=K z3T$K|sG+)p(_>^}aLiAO8TX=!EOVKe<)#>T?v`{~zmzW|gHCLxHhP~1la9aZlkLo3 zDk~-&+2zlBeA6<7E_=vOSoG{ItrpnWe4}w+u|2AS$ko(WBtH8{$k@dinNmJAw@NmnsKscukvOXH4KVZDKrf zpr>$+Z1=D-ds?YMesbM>LO9@~=AX65)^6n@92P*+NsG6uLAwL-z zE>JEb5Uvt_FBOS>355lmyQcHFYwP5~Cr`!5Eno0bj1JTA>>CS5A_cWD;}&EyfrZGa z;A2+9^my0-yVQD+@*n6acG$#KtzDxXV^I|qld|ss=7eTFKZ7hn8jhm_i2J04i9#h? z@8S3zV;TJgzS4HEP#GVsS7NNVMsGhQmD}~@CJyRAW*&{K6QmyW9S6@B@)SrdfTrv! z?=8Rou=-*ZG9xNCRA^vllSHmEG`2}jVkg2@QN+}`wC{@J~m)}t$wz( zwNj8dg<6sGo2Ty6iGg!}wS&^2YGR2EwC#~NTv2PVp2@MP?|3K)Ad>&SXzkO+B5%Pg ziEXeL3#hxKy`p`-2R%9hrAsH*CYln7vWqlzE@m7*x=ZlF5AJ&q6(6nivtyU^!oMqR zwO;D^k)tI)f3Z!?sV?&VlKDq`J4!ha=F9!1Npc$p*sE7SFluOdtgqdFm%vf&(@OuW z$*^-aA3($}r*iOSAFjUPsDM%<5G+c4ZKvsPiNW87C7rSR^(J4%G+r{v;TRuHT?V!y+Qq~ zPV!zI$i)fHqlgnYAJa{GCyD|{<2p9_J!K2v%@9p#n63PfR2FGwBC0~@{(olk*1aoUtO z5@SS}smE)wuR&M+OhKzvmofg2LHZx)=huF4i@u=>DKfT!!%u%dP!rs!7IxW3K;ACo z#ICl5AEVKr1P1bHcHe}pXJhb%Bn}?O` zr8na^c*D@O-dZROv`MRtS@>o=#B69S0sCkDCUcz?idobz@R@K1A2ip1XCAPj9i4EF zl>Z!T*}u|3}z#~15Je7%j*=`jUNg3!(A%a^V3as;$R-c zdy=}qt9{r7M@oimj|2aP$Wnda+wX_G=2Fgx7SD4*cz*Zy85naNX#cRx@6Z+kz^%uS z&3O^$6l-6}{mT+s^KJ6rrBEzHTyCdvNPfzXaz~ps(Tnma`JcKLcJ~_#-}M|k122~U z3z;DwA8Wa04!|0qm=wx4+LUvtK=nz15kf1o{;;0v7!m|ZQhskVc7NALjA{@!<~8hm zL%oVSp>_duf~ss1bEGr9Roh|Zii1AO2liKx71>kxF4NbfcwkGeEc}m~(PcYz;D72D zs2oTtz>du!MI!w&h55`zKAX1)I!73fgNTul^mIB}=pkAtdV4*~PyGP?myHVKfFVpE zXe2XBLow+RRGD3bpsn^+6)=`3YwtN_`8ajVg9pD)2XQz6CSXy~q3dDtIp zN3u*0M7TD16gB|%AM_Q}uqc$gG2%0AC=}@=F`wb;wNga4kgb8~eQn`L)B&T1E4t#0 z@POgG)$x`Y2?AorZDf)ByWs8jAeX@ed|3bmF_rP#&9yox^uT99`agSM#7J-jT%fv- zhys$R;sb4JMuJg~n*0ck?amDazwPKp0onZi!25evdWMg}3cSF1%IVwEd`9ZFaXiN) zBOOwt0h9?^Jf~{Ir-6XhJ#XbQS5nA4Ql*HsNwB8X-=H*uUAeqbQ5R25BN*)iBL`J= zq>OcHCbl&V`Ws+3w1|)3=~f_20Ssk3FHzOyo#|NiA(qFVs>(ByXp1 zwEa;>B3_q|0M~Ch(PG@>G)ByN`{V>3Wh0fnc=h{YX~L=3^Oqi%dc9{Je?8Z({t(}_ zR23!3JmT-JTB79~Ia)(5V=n&IiDqyZQf*WV8@T#y%b^5255UszgB+?qO1LipS(3|} z@rNe^3gh3rBp@a3iBR9H>Z?g)%5Y4FdiG%bhduQy;c#H0HWsD_$`wP!A!E1^U?HB6 zh-HQV>*tjhCOF6P-4fj0Hr>wSd|LKwc6)6Vb%Rj+I_qrZOzVd(VrvJ7OPW2)>lK!|^kRj%AsbMkFzJ*y;9Jf80G>wb)z~)*{8+ z^;QsN9%>N^QHml^a9IR-qdViK52oNxMQ0&u4{bk2k(@D3NwX3awD^Em2xX&-1(;<93nrb^6lzO(n&DP13im7 zEeuLdgcM>ez>>J-_~QlYp&F!Q@=w;0j{HX zYhEsFUf;d@9i-Yh@q*4OHs{-dATq6p5dr+w4oSg5l1I}DU#j{_O%Uj)MZwu2ISSH` zfldyR+-(84omQ*fMCfucJGgjK*24fc)D^Jd%Q{#Xt~I}u?&un&-{S8*v-x$I$$dR^ zlXl(U1Z)2zmqSZ>SOVI9O(ZO7t@x|n=v8d9MzBT8#=BQa-;OcPkORld$}s?Zcem%p zKEGY(F{c91y9p+q6>tbc5?$DBJ`%pRI-)O%_<%gg0`~ZjdRs>It`g#N)~7V+q#(7D z53b=oOhc3)#^jzCtk3+PU+y}z%fDeKWUC5!`8~B3b&4;lqXKlg*`4&KJgG4VrH&fdgcfV;p`pzH}(0rpb{I@ImoQLs zmD%9NShr1=vud6VX0R||_n{=>WvzJS!l#z~F8(Ykr)^{A+~WDsisR-|Z^yMcXsB-&`zG+rmMYx&e)$dEk6mNnm_{sBSXed}WT7Dcj4(NXpC)Gat7)=) zK)VNvaDoZy0FNFp#xNWiZA*%hS%e%HbSR%Zn=R6nEa-b>vj_y(^)D7+Ju5e#>SIvy z0RNcxIS}10g9~6x=~mbva;&QCQDijCcL-CDB~Xv7sC&wZjuA9!KQ%;4pZ}h-;}qIc z{_@azy5Lvj%7rd!aj*?zU0NLT*c+4}oBZzS7xCsbJKqr2|K5X%si1*sXwY_+P>RRa z@lq9ld_^qpn0EreFk_geSPsxZL-o0ZgCDiOjt^^S7F2tV*u!EC#5gAc>tItO66qq| zCMvpy_{2~I?kfR()3;PW&id9UMHs^AZ0ZTS@(WN3DV?ApOLH6vnq^qpFst7D zEExH4PxhJSMz&TL4bu8%XWgpcBdLB<&mqH4?9EvU|DbNDQpNLe>33n5OwP+}ms7h- zD+-n#jH7hV8Lk{8i>M_#$j6O`9ex7L?2K4zlHpk$txv(_YX~6U653LW2CQmf%1Z zRu><5n3oz+0!Ipe%}8K50IPxFJ4Sa9!mB1R&j3oWxz{vwRx^5XZY$vkI#SeC#nkrJSK%?E)W^qV z$FYSO2)V_k!tJ@rL)BPX?J&d(SMy4jY`2ctv{;f|;>qfYKOet8)LStA{K_Uhi@rx^ z3luO)GVZaWEHvaIN0xCksRmLe>=PLRv9{P7nh^N`U9$h6AV1u+l>ItiG~d9|IaHiv zS7yJik8l!>Gl9Ag(v{#bOFP}85v zJgXlSX^cDwnWx&tskwJbZ?d?Z+$$RV%9_zw!xr|70N(rj!AJp7FC`UWGji}eU@mj;?(}7Y>>Y7Ro%CjeOJcy1C=FId z9DbV}8O=GWI6k#5%d|B;2^d?F+v?r!MCnPAsbJFEU)JQi?RqT)hZiB$e)G#_0x6Yv z)2rk5-EFlcqJE1|UkNEu2FXm(P9FpR4^6SSQ`s_@L@*`AvadKoO%2x_;RIR!?&P1s_n z^!&ld`xn4Wo&-aMKDY<(l$i=251AX~QNKUi$7bvzRj>XyW z(x1jv4&I=cNWEe{vhM&F%)TVIYXwo$s{(pIE;Z(tK;dloqgMla(E@-~KE^ZSY4Woh zH&x|OcL8#l`qxnsxl+kaVm0R!@l@JKs^r6J9@)P@-FyGbUUq$u)jz^jv>o6{?rH-G zYY&74GFM`J=(Cka#B_mkY(kK#UuAYuKV(11i|IY0 z`@BGFKq~vdd;L>|qiIF3ZRnp9HLQ^|jV-SqgadP;yZ__DDHYl~?GVDyuOAICjoZL& zko9z}RcJnMj~BLPxAIl*0ke$CuL_>W#=FGR`C5{U0KR)=u>WLJJsBMf8toU;vezlOJSg3&Mew*U9Z@<}A#=HO0H+}R2axfg$7-${q$YF3h)I8c+8DYgyqnjjdEH!|SKU3?B%WgEZ2Z+s3Ug&NsR5 zhwg|CU*N%P(cJJ$C2}V95)_X>M`G}X) zEx@Wk7A%^azb?N=u4=Wv@M*66=a+%^Y%;~*H>Xlub4)QW=#=KhN=IC@OVinoY*Rg& z$G(8n=C2&#OU`V-5zXev$~D3xZitzGJu6VoQm__*_!9vVgZ7?Yj{OC9ihD2K z!QT%>1pVAuec(EO(FK`IpFkP z_$SJ^%1%Uo`sXRl0;whW=qsH$G8l~jGd8EA`@nD~l7G7Nw?+vm*e3^KdII9;Ege`6 zWskRbXoBjVPVIXUx~!^=+)1Am0V$ze#cRL z-Cqw6fEqR0P=p}RD&`$pY~8#Sa{n>_3jzw&i)h`KV3QkZtz>3bJrogh+RMJ;I?NIp zzRK1|&u;AXDWL9Z@i|5)f;)-$22Sqy*VluV_=3T-31Mi&MrPw6u7jf5nBzau#$w?5&*f%m)wvEdy5$v``Gv;9dxK<#m;N_y5D z`FP!8-cMTzU#FQ=i2Xwu2v8o2#$~-OggWIG;VusYtm)l5tAwYPa+>_2sKe<=m?YgQo;X9_x13-_xCJ{Og!WTtI53R@EixE zlex!_x*7`wda;gg*Q*MwJ>uc^}u2O=!JAMgm8SvdtPChfWym(wykUIHCNxk2W zYyJ1UKHCx3E(=}Id>y#rca{4RO>-^O%`1X$Uho`rb|ltdZjN#uG#l}rbF>6r2tIZy z*+x{gF2&m+%iFI{yL{e%*Xw*`PoC*qZp8(Jn_uX{T>I>f4- zqrA%?gchJzH9GcN;jL|o=02p%`k=F4a`&-s?^;b-bnAZGtb0p%tfW;KQXLX*;B${j zh|^Rt0J?axyQMY=doIc{FMmOw}g>4ZhAXf7q?VsE6im z`Wm1J(@zFl!)R6WcOV{^<6mY%m@c$!!Li8MkNJCB8SQ}x_rq~+24nei@UbrNrJ z)Vim0w5G}H8GW&;Z2nWLRZkI02lQGvcLSa>8F|&L@>+9V8e5LQl<@Ei_J8pe7eto_``pTbvc?0IF{rk&>qhD$K z&li{n|969Mm;?m{+@VsHXFqF*a2y^CPHl>xD--;Y549p6_)A3;D<=8NtccX?QYDPr zd^)g)z|2n(C2w<2?v&IHNing+<&$@$`YL)C#_Wj4B~4lXc}=QGj;KaO``57l h@4mJF(I8$fDw?xdM_%KGA|=0faNp6r1y&wc{|i)s$?yOG literal 0 HcmV?d00001 diff --git a/make_datasets/csv/stars_name.csv b/make_datasets/csv/stars_name.csv deleted file mode 100644 index 4c0015b..0000000 --- a/make_datasets/csv/stars_name.csv +++ /dev/null @@ -1 +0,0 @@ -邓紫棋,范玮琪,赫子铭,邵美琪,李易峰,尤浩然,张丰毅,李宗翰,范冰冰,朱亚文,彭丹,蔡琴,杨幂,刘嘉玲,童蕾,张丹峰,Angelababy,闫妮,热依扎,柯受良,唐嫣,张静初,林忆莲,濮存昕,刘诗诗,莫小棋,潘粤明,贾晓晨,周星驰,毛俊杰,万茜,隋俊波,李小璐,赵文卓,潘长江,毛孩,柳岩,应采儿,郑秀文,吕丽萍,周杰伦,聂远,萧淑慎,安雅萍,赵丽颖,毛晓彤,宣萱,黄小柔,张柏芝,张智尧,何美钿,涂黎曼,赵本山,黄秋生,张铁林,印小天,刘德华,唐一菲,于明加,张可颐,姚笛,干露露,李光洁,任容萱,高圆圆,杨丞琳,陈慧琳,余少群,谢霆锋,利智,左小青,徐怀钰,林志颖,六小龄童,郑元畅,朱媛媛,李菲儿,梁朝伟,苗侨伟,赵宝刚,吴镇宇,贾青,吴亚馨,楼南光,林志玲,郑伊健,曾志伟,吴京安,钟汉良,金城武,侯梦莎,王传一,孙俪,黎姿,吴君如,刘洲成,戚薇,张惠妹,周韵,丁小芹,林心如,言承旭,植敬雯,许冠文,房祖名,邱泽,叶玉卿,莫少聪,佟丽娅,张曼玉,辰亦儒,李李仁,吴奇隆,蓝洁瑛,陈百强,田海蓉,郭碧婷,邹兆龙,薛佳凝,戚美珍,李连杰,蒋勤勤,谷智鑫,吴廷烨,汤唯,吴绮莉,朱梓骁,丁春诚,马伊琍,钟丽缇,马景涛,张博宇,陆毅,张嘉倪,狄龙,马浚伟,周润发,金世佳,何超仪,冯宝宝,陈乔恩,杨恭如,周渝民,王思平,吴尊,卓依婷,朱一龙,龙飘飘,陈妍希,欧弟,张雨生,张雅蓓,陈冠希,陈建州,陶慧敏,官晶华,杜汶泽,梁家辉,吕颂贤,衣珊,王祖贤,袁咏仪,张译,鲍国安,伊能静,钟嘉欣,袁洁莹,罗乐林,张学友,李依晓,杨童舒,曹炳琨,罗志祥,杨乐乐,李心洁,姜昕言,赵又廷,刘若英,秦沛,朱铁,王祖蓝,李嘉欣,隋棠,丁嘉丽,吴秀波,颜丹晨,李诗韵,谭耀文,马天宇,莫文蔚,郑则仕,金玉婷,张耀扬,元彪,贺军翔,涂松岩,娄艺潇,徐熙娣,斯琴高丽,萧正楠,蔡依林,杨玏,王君平,王一楠,林青霞,张含韵,程愫,唐笑,彭于晏,蒋劲夫,王茜华,程琤,王丽坤,王若涵,王小利,狄莺,孙红雷,袁弘,卓文萱,郑俊弘,甄子丹,安志杰,冯淬帆,刘畊宏,李小冉,袁立,汤镇业,傅明宪,徐熙媛,余男,闫学晶,吴建飞,马苏,薛凯琪,于小伟,周秀娜,张俪,梁小龙,林子祥,沈卓盈,林依晨,庾澄庆,郭晋安,赵文瑄,巨兴茂,刘晓洁,徐帆,黄子扬,徐若瑄,曾沛慈,董骠,夏一瑶,叶一茜,沈佳妮,陈展鹏,刘思言,张歆艺,宋丹丹,蔡康永,赖琳恩,释小龙,潘玮柏,蒲巴甲,杨钫涵,汪东城,斓曦,杨佑宁,辛柏青,吴映洁,焦恩俊,陈法蓉,颜仟汶,王珞丹,马雅舒,王力可,陆树铭,张智霖,吕一,天心,崔漫莉,关之琳,阮经天,迟重瑞,邬倩倩,江疏影,翁美玲,曾恺玹,寺唯宏正,舒淇,王学兵,刘蓓,丁子峻,张嘉译,钱小豪,王雅捷,邵兵,颖儿,黄磊,郑浩南,王若心,陈意涵,林熙蕾,于荣光,钮承泽,安以轩,林雅诗,小小彬,刘劲,李佳航,蔡少芬,刘品言,王一淼,霍思燕,何润东,张震岳,邢佳栋,徐静蕾,张凯丽,温兆伦,利昂霖,吴京,童瑶,林依轮,陈彦妃,蔡卓妍,徐正曦,何家劲,黎耀祥,吴彦祖,姚星彤,江祖平,谢金燕,赵雅芝,姚芊羽,李保田,叶蕴仪,田朴珺,欧阳震华,马少骅,张艾嘉,尹天照,潘霜霜,韩雨芹,吴家丽,张韶涵,唐禹哲,戚迹,毛舜筠,梅艳芳,侯佩岑,陶晶莹,班嘉佳,贾静雯,钟楚红,邬君梅,谢君豪,俞飞鸿,余文乐,周晓涵,谭松韵,张卫健,关悦,蓝盈莹,曾宝仪,炎亚纶,秦海璐,唐诗咏,方中信,李亚鹏,许冠杰,张文慈,马蹄露,谭咏麟,高露,杨一展,苑琼丹,苏有朋,钟镇涛,徐洪浩,王子子,周海媚,万梓良,吕良伟,杨泽霖,陈若仪,汤加丽,杨明娜,张晨光,陈数,陈志朋,张涵予,赵子惠,郭富城,茹萍,徐熙颜,李幼斌,陈怡蓉,郭珍霓,曹达华,田蕊妮,寇振海,姜大卫,赵子琪,赵传,曾之乔,薛芷伦,寇乃馨,李宇春,张靓颖,周笔畅,何洁,刘亦菲,陈好,尚雯婕,张筱雨,韩雪,孙菲菲,陈紫函,朱雅琼,江一燕,厉娜,许飞,胡灵,郝菲尔,刘力扬,reborn,章子怡,谭维维,魏佳庆,张亚飞,李旭丹,孙艺心,巩贺,艾梦萌,闰妮,王蓉,汤芳,牛萌萌,赵薇,周迅,金莎,纪敏佳,黄雅莉,阿桑,董卿,金铭,徐行,朱妍,夏颖,陈西贝,冯家妹,高娅媛,林爽,郑靖文,陶虹,黄奕,董洁,巩俐,于娜,孟广美,Gameapple,美女奉奉,小龙女彤彤,张子萱,果子,丁贝莉,吸血芭比,公交MM,香香,段思思,二月丫头,刘羽琦,dodolook,拉拉公主,沈丽君,周璟馨,丁叮,谢雅雯,陈嘉琪,宋琳,郭慧敏,卢洁云,佘曼妮,黄景,马艳丽,蒋雯丽,宁静,许晴,瞿颖,张延,闵春晓,蔡飞雨,邓莎,白冰,程媛媛,吴婷,殷叶子,朱伟珊,孙菂,赵梦恬,龚洁,许晚秋,杨舒婷,乔维怡,王海珍,易慧,谢雨欣,陈娟红,舒畅,曹颖,李冰冰,王艳,沈星,阿朵,周洁,杨林,李霞,陈自瑶,李湘,金巧巧,梅婷,刘涛,安又琪,杨钰莹,马伊俐,陈红,鲍蕾,牛莉,胡可,龚蓓苾,田震,吕燕,王姬,苗圃,李欣汝,王小丫,秦岚,彭心怡,邓婕,眉佳,李媛媛,刘晓庆,杨若兮,黄圣依,林熙,斯琴格日乐,宋祖英,郝蕾,乐珈彤,冯婧,盖丽丽,杨澜,沈冰,王宇婕,王希维,姜培琳,何晴,焦媛,白灵,胡静,陈冲,刘怡君,韦唯,龚雪,周彦宏,刘丹,傅艺伟,谢东娜,黑鸭子,周璇,杨欣,陈小艺,伍宇娟,苏瑾,李玲玉,潘虹,沈丹萍,岳红,赵静怡,宋晓英,梁咏琪,李纹,林嘉欣,周丽淇,潘伟柏,梁静茹,周慧敏,杨千桦,林俊杰,孙燕姿,杜雯惠,郑少秋,柯有伦,麦兆辉,林咏伦,苏友朋,李志刚,杜德伟,方力申,刘日曦,刘雅丽,陈炜,孙佳君,崔健邦,陈秀雯,郭霭明,樊亦敏,黄倩,张小娴,袁文杰,连凯,刘伟强,许志安,彭嘉丽,区文诗,汤盈盈,侯湘婷,张慧仪,梁韵蕊,陈颖研,李婉华,许鞍华,向海岚,丘凯敏,刘晓彤,文颂娴,刘心悠,廖安丽,胡彦斌,黄耀明,伦永亮,夏萍,陈芷菁,朱茵,关德辉,韩君婷,张曼伶,林国斌,黄智贤,彭健新,陈浩民,廖碧儿,胡林,梁绰妍,邓蔼霖,八两金,陈文媛,赵学而,林保怡,黄宗泽,黄泽锋,周国丰,李国豪,李小龙,梁家仁,邝美云,蔡一智,卢巧音,车淑梅,梁芷珊,刘钖明,李若彤,郑裕玲,林莉,彭羚,洪欣,徐子淇,叶童,施念慈,嘉碧仪,李心媚,陈加玲,蔡卓研,司徒瑞祈,应昌佑,杨采妮,畲诗曼,李思欣,陈绮贞,杨雪仪,江芷妮,陈采岚,林一峰,潘芝莉,欧阳妙芝,黄凯芹,锺丽缇,活己岚,欧倩怡,姚嘉妮,李玟,蒋怡,马小灵,陈宝珠,缪骞人,吴家乐,罗启新,陈敏婷,王心凌,胡凯欣,刘松仁,陈晓东,李修贤,夏韶声,王杰,胡大为,吴大维,陶吉吉,卢戴维,曾建明,黄子华,吴浩康,何韵诗,叶德娴,麦子豪,张国洪,李浩林,郭政鸿,李迪文,夏健龙,关浩扬,陈海恒,李逸朗,张伟文,唐剑康,朱洁仪,陈嘉熙,陈奕迅,沈傲君,张兆辉,吴国敬,梁汉文,苏志威,姚莹莹,何绮玲,姚子羚,孙泳恩,江美仪,陈洁灵,李美凤,江希文,廖隽嘉,陈奕斯,官恩娜,陈嘉桦,蔡雪敏,陈雅伦,卢凯彤,蔡子健,邓肇欣,萧亚轩,倪晨曦,林二汶,周华健,卢淑仪,关宝慧,黄伊汶,张锦程,周国贤,葛民辉,巫启贤,孙耀威,伍家廉,郭伟亮,李天翔,李敏,袁弥明,关咏荷,陈嘉容,麦包,许慧欣,陈法拉,王菲,黄日华,活希儿,袁彩云,陈慧珊,张天爱,郭少芸,叶丽仪,陈勋奇,李进,海俊杰,罗嘉良,唐奕聪,林苑,锺沛枝,黄淑仪,杨其龙,锺欣桐,陈素莹,利嘉儿,陈嘉上,叶佩雯,李克勤,谭小环,徐濠萦,刘恺威,田馥甄,朱凯婷,欧海伦,马海伦,谭玉瑛,陈玉莲,吕怡慧,温碧霞,黄泆潼,梁佩瑚,陈逸宁,梁洛施,寇鸿萍,王菀之,成龙,陈晓琪,王树熹,吴宗宪,张洁莲,吴倩莲,梁咏琳,关心妍,黄沾,林海峰,李嘉文,卫兰,马诗慧,马阅,张咏恩,曾仕贤,朱永棠,梁琤,房祖明,张信哲,刘镇伟,陈汉诗,甄妮,张洪量,林晓峰,李汉文,卫诗,詹瑞文,少爷占,马楚成,谷祖琳,岑宝儿,唐丽球,陈佩珊,马伟豪,倪震,马德钟,郑敬基,陈任,容祖儿,邓建明,梁荣忠,万绮雯,蔡慧敏,吴宇森,姜戴维,杜琪峰,邓兆尊,颜国梁,阮德锵,李思捷,邓一君,陈苑琪,蔡依琳,李中希,陈小春,萧芳芳,陈彦行,李乐诗,郑欣宜,盖鸣辉,滕丽名,高丽虹,夏利奥,陈琪,麦浚龙,侧田,汤宝如,吴雨霏,郁礼贤,徐子珊,周汶锜,梁佩诗,谢安琪,蔡枫华,朱孝天,吴克群,王合喜,李静怡,郭可盈,马国明,陈启泰,卢惠光,锺镇涛,关智斌,郑嘉颖,林子瑄,商天娥,陈洁仪,麦洁文,阮小仪,王家敏,梁雪湄,周俊伟,吴启华,郑丹瑞,李亚男,唐宁,古巨基,詹志文,黎明,张国荣,陈慧明,何俐恩,李莉莉,康子妮,王馨平,锺嘉欣,汪明荃,郭静,李丽珍,朱玲玲,罗莉萨,古天乐,阮兆祥,樊少皇,张继聪,苏玉华,卢冠廷,盖世宝,沈殿霞,陈美诗,刘思惠,傅佩嘉,蒋雅文,曹敏莉,王秀琳,霍纹希,陈曼娜,许秋怡,魏骏杰,曾华倩,陈妙瑛,锺丽淇,原子鏸,雷颂德,郭耀明,陈霁平,肥妈,刘家聪,张玛莉,韩马利,邹凯光,高钧贤,范晓萱,许美静,罗美薇,吴婉芳,邝文珣,赖雅妍,吴美珩,谢天华,王敏德,周文健,梁继璋,唐文龙,陶大宇,王光良,杨紫琼,叶璇,郑雪儿,米雪,伏明霞,张佳佳,朱健钧,杨爱谨,罗敏庄,朱咪咪,杨千嬅,卢敏仪,董敏莉,陈法容,陈豪,胡杏儿,陈松伶,兰茜,薛家燕,胡定欣,陈百祥,黄纪莹,吴文忻,沈玉薇,谭凯欣,张燊悦,张家辉,林中定,周丽琪,郑文雅,吴佩慈,胡蓓蔚,黄柏高,刘彩玉,刘绰琪,邓建泓,雷有晖,黄贯中,锺保罗,方保罗,谢伟俊,徐小凤,陈宝莲,杨宝玲,任葆琳,陈可辛,黎彼得,文彼得,蓝奕邦,陈国邦,刘美君,朱慧敏,陈莉敏,黄婉伶,傅天颖,郭芯其,陈建颖,林峰,黄百鸣,蔡济文,曹永廉,许同恩,杨洛婷,蔡一杰,戴梦梦,范振锋,许冠英,林振强,任贤齐,韦绮珊,郑健乐,梁奕伦,罗文,吴卓羲,郑中基,于仁泰,黄婉佩,周永恒,姚乐碧,黄卓玲,蔡洁雯,叶蒨文,李璨森,陈宇琛,洪金宝,陈键锋,森美,林青峰,杨英伟,邰正宵,刘永健,林姗姗,任家萱,李丽蕊,王卉,刘青云,周影,谢宁,刘婉君,张敏,陈敏芝,郭秀云,陈淑兰,邓萃雯,邓上文,姚乐怡,张玉珊,关淑怡,杨思琦,张晓,关秀媚,任达华,雷宇扬,伍思凯,梁思浩,李嘉慧,郭羡妮,唐季礼,黄湘怡,冯德伦,颜福伟,陈志云,车沅沅,黎明诗,郑融,王从希,邓丽欣,黄家强,张致恒,黎瑞莲,陈秀茹,陈锦鸿,杨怡,傅颖,李绮红,李茏怡,林敏俐,谢宛婷,洪天明,何宝生,陈德彰,林祖辉,梁靖琪,阮民安,叶翠翠,黄宇诗,周嘉玲,范逸臣,邓颖芝,吴建豪,梁慧嘉,邹琛玮,陈司翰,唐韦琪,蔡淇俊,谷德昭,王浩信,王贤志,陈德容,李明慧,黎瑞恩,骆乐,梁小冰,杨秀惠,雷凯欣,锺汉良,翁慧德,莫华伦,韩毓霞,刘浩龙,苏永康,孔庆翔,陈自强,林颖娴,张咏妍,刘小慧,俞琤,杨婉仪,梁敏仪,黄伟文,唐尧麟,张茵,甄楚倩,蒙嘉慧,郑希怡,刘文娟,谭凯琪,苍井空,川滨奈美,堤莎也加,町田梨乃,二阶堂仁美,饭岛爱,饭田夏帆,饭冢友子,芳本叶月,冈崎结由,冈田丽奈,高木萌美,高田礼子,高原流美,宫本真美,宫岛司,古都光,光月夜也,河村亚季子,河井梨绪,黑崎扇菜,红月流奈,华歌恋,吉川萌,及川奈央,吉川真奈美,吉崎纱南,吉野莎莉,今井明日香,今木翔子,金泽蓝子,进藤玲菜,井上可奈,久保美希,酒井未希,臼井利奈,菊池丽香,菊池英里,菊池智子,橘真央,具志坚阳子,可爱亚织沙,葵小夏,蓝山南,兰望美,里见奈奈子,里美奈奈子,里美由梨香,立花丽华,立木爱,凉白舞,铃川玲理,铃江纹奈,铃木麻奈美,芦屋瞳,麻川美绪,麻生叶子,美里霞,美崎凉香,美雪沙织,美月莲,明日香,木谷麻耶,奈奈见沙织,内藤花苗,内田理沙,鲇川亚美,片濑亚纪,平山朝香,前原优树,前原佑子,浅见伽椰,浅井理,青木琳,青木玲,青野诗织,青羽未来,青沼知朝,秋本玲子,秋菜里子,秋元优奈,如月可怜,若林树里,若月树里,森下理音,纱月结花,杉浦清香,杉山亚来,山下由美子,杉原凉子,上原留华,神城千佳,神崎麻衣,神崎麻子,矢吹丽,手束桃,树本凉子,水城凛,水野朋美,水野茜,水越丽子,四季彩香寺田弥生,松浦梦,松浦唯,松田千夏,松下爱来,松下可怜,松元静香,速水真保,藤彩香,藤代流花,藤崎秋,藤森智子,天衣美津,田村麻衣,望月瞳,舞岛美织,午后野弥生,西泽友里,夏美舞,相川未希,相户爱,相田由美,小仓杏,小川流果,筱宫知世,小栗杏菜,小森美王,小室优奈,小野由佳,筱原凉,小泽菜穗,小泽玛丽亚,筱冢真树子,星爱丽斯,星崎瞳,星野绫香,星野洋子,星野真弥,徐若樱,雪乃小春,岩下美季,遥优衣,野宫美忧,野原奈津美,叶月千穗,伊东美华,一色丽矢,一色鲇美,一条沙希,乙伊沙也加,樱井沙也加,由树莉莉,有川真生,有吉奈生子,有森玲香,雨宫优衣,原千寻,原史奈,原田春奈,远野麻耶,月野静玖,早纪麻未,早乙女舞,泽舞音,长濑爱,长月亚美,真木亚里沙,真山润,中川珠代,中村理央,中根由真,中山美玖,中原绫,仲井美帆,竹田树理,佐伯美奈,佐佐木,幸田梨纱,北原爱子,成膳任,戴文青木,德永千奈美,笛木优子,福原爱,高见美香,高树玛丽亚,宫崎葵,观月雏乃,海江田纯子,后藤理莎,后藤香南子,矶山沙耶香,矶山沙也加,吉冈美穗,吉泽瞳,加纳则子,加藤小雪,菅谷梨沙子,结城翼,井上和香,井上熏,酒井瑛里,久纱野水萌,铃木爱理,玲木美生,泷泽乃南,美依旗由美,木下亚由美,前田知惠,前原爱,浅田真央,清水佐纪,入江纱绫,三尺真奈美,三宅尚子,森下千里,上原绫,石村舞波,矢田亚希子,市川由衣,市井纱耶香,嗣永桃子,松岛菜菜子,松居彩,松元莉绪,樋口真未,细川直美,夏烧雅,相乐纪子,小川熏,小林惠美,小野奈美,小泽真珠,星野亚希,须藤茉麻,亚纪奈,岩田小百合,伊藤步,优香,友崎玲,中泽裕子,佐藤麻纱,安藤沙耶,奥山唯子,白崎令于,柏木奈纯,板谷佑,滨田翔子,朝雾唯,川崎爱,大和抚子,大西由梨香,岛田百合花,二宫优,绀野舞子,岗原厚子,高鸠阳子,古河由摩,谷田未央,河合绫纯美,和久井辛,和希沙,黑田美礼,横仓里奈,后藤亚维梨,户田惠梨香,吉濑美智子,加藤麻依,江纱绫,井上诗织,井上优香,井真理绘,堀井美月,莲沼民子,柳明日,落合玲奈,牧濑奈美,木下亚由,奈良沙绪理,浅丘南,秋本那夜,秋山玲子,秋庭比吕子,三尺真奈,三井保奈美,森下真理,山吹美花,山口纱弥加,杉里香,神代弓子,树梨沙,水谷利加,松鸠永里奈,松山麻美,松屿初音,塔山直美,藤香南子,天使美树,天野理惠,田崎由希,桐岛淳子,尾崎美果,西野美绪,相泽优香,小林里实,小早川洋子,叶山小姬,樱树露衣,樱田佳子,永井绘理香,远藤真纪,早川美波,早川桃香,折原琴,中鸠广香,中泽夏木,竹野内丰,佐藤江梨花,爱内萌,爱田露美,爱田毛毛,安倍夏实,安原真美,奥山香,八木泽,白川美奈美,白鸟智惠子,白亚朱里,北山静香,北原步,北原真裕,仓本安奈,仓田杏里,朝比奈真理,朝仓加穗里,朝仓玛丽亚,持田茜,冲田由加里,川奈由依,大友梨奈,岛田香奈,堤沙也加,渡边香,风间舞,风见京子,福山洋子,冈本夏生,高仓梨奈,高野瞳,宫本瞳,宫本阳子,宫地奈,宫间沙布子,工藤瞳,宫下杏菜,河田纯子,和希结衣,横山千枝,华美月,姬野莉梦,吉田友里,吉野碧,菅野美寿纪,江口美贵,结衣美沙,金城美丽,井上彩菜,井上雅,鸠村熏,酒井美幸,菊池则江,君岛美香,可爱亚芝莎,来栖敦子,蓝原夕妃,蛯原舞,立花优,立原贵美,恋野恋,铃木由美,麻生岬,麻生真美子,麻田子,茂森亚弓,美波志保,木内亚吉拉,内田京香,品田由依,平山绫,前岛美步,前田亚纪,浅见怜,浅野子,青木美里,清木裕子,青山遥,青山叶子,三濑真美子,三崎真绪,三上夕希,三尾安齐,森宏子,森山子,森野雫,山口理惠,山口美纪,山口萌,杉山圭,杉田惠美,山田誉子,杉原爱砂,上原亚也加,神崎美树,神田美沙纪,圣瑛麻,石川恩惠,石川瞳,石川优季奈,矢吹凉子,矢吹麻理奈,矢择优子,水城梓,水希遥,松岛奈奈子,松纱良,速水怜,藤井彩,藤崎弭代,田山真美子,田中梨子,田中美久,瞳理欧,望月七,望月沙香,望月英子,武田真治,夏结玲,相纪美,相乐晴子,相仁泽美,相原里奈,翔巴辉,小池亚弭,小峰由衣,筱宫庆子,小田凉子,小向杏奈,小野濑,幸田李梨,岩崎千鹤,野宫凛子,野野由利加,叶山路易,一宫理绘,伊藤美沙纪,一条小百合,樱庭凉子,永井流奈,优木里绪奈,优木美纱,羽田夕夏,原惠美子,远山雪乃,远藤梨奈,早濑理沙,早乙女香织,长谷川,真纯麻子,织原奈美,柊丽子,中条佳奈子,中野美奈,仲村桃,足立美,佐藤有纪,八木亚希子,朝比奈英里,朝仓仁美,朝仓香乃,朝仓遥,朝美光,朝美绘乃香,朝丘瞳,朝霞佳绘美,赤西凉,川野亚希子,大久保玲,饭干惠子,福光千穗,冈田安希,高以亚希子,和久井由菜,吉木纯菜,吉泽京子,井上华菜,君野梦,堀切子,楠麻耶,南使香,平丸久美子,青木友梨,仁乃庆子,三浦富美子,山本京子,榊彩弥,矢野显子,水野亚美,水泽千夏,太田优,藤森子,相崎琴音,相泽纪美,星美织,杨原京子,早川濑里奈,斋藤朝子,长泽筑实,中谷香织,中森子,中条美华,竹内爱,庄司爱,佐藤春菜,佐藤子,津野田熏,吉井玲奈,阿嘉奈津,安藤希,安田奈央,奥川希美,奥山美夏,白鸟美由纪,百合香,北村夕起,北山英里,朝仓志穗,朝丘南,朝丘纱智,朝日美穗,朝永真弥,朝长真弥,纯名梨沙,村田洋子,大林典子,二瓶有香,芳贺优里雅,妃今日子,福美香织,冈本多绪,刚野凤子,高桥由美子,桂木萌,河合梓,横须贺,吉成香,吉村优,臼井里绘,橘友贺,来栖凉子,濑户美贵子,濑雅子,林绘里,玲樱奈,美月由奈,梦野玛丽亚,秘叉华,木内美步,木尾原真弓,牧野泉,鲇川直美,清水理惠子,萩原舞,泉由香,三浦桃,松树梨,松下真依子,松元伊代,藤宫知世,田村茜,田真潮,桐岛惠理香,梶原真弓,西尾佑里,相泽朝海,相泽沙赖,小阪光,小仓艾莉丝,筱峰爱,野乃原,优里香,由津希,泽绪凛,长曾我部蓉子,真莉亚,真崎麻衣,仲根佳绘美,爱原千芙,绫波优,奥菜千春,奥菜翼,八木原麻优,白川沙也加,白石枫,白石麻梨子,宝来美雪,北原雪,宝生琉璃,草剃纯,长濑茜,赤阪梨乃,赤阪美步,大路惠美,岛津讶子,德井唯,儿岛香绪里,福泽京子,宫泽璃音,吉野美穗,橘琉璃,濑户准,濑名凉子,片濑梨音,齐藤美穗,枪田彩野,桥本美步,三笑明日香,上原绘里香,石井淳笑,松冈理穗,松井夏穗,松元亚璃沙,唐泽美树,小池绘美子,小泉琉美,小山涉,小野谷实穗,星野琉海,续丽子,岩崎美穗,泽山凉子,辰巳奈都子,热田久美,姬野香,榎本瑞穗,榎本瑞怆,榎木加奈子,星野真唯,八木泽莉央,八木泽景一,柏木瞳一郎,坂下千里子,保坂拓见,北原一咲,泷北智子,夏本加奈子,端本千奈美,爱本瑞穗,本树尤真,仓内安奈,小鹿纯子,长坂仁惠,赤坂丽,赤坂七惠,赤咲伶奈,川村美咲,春日咲衣,镰田奈津美,风见里穗,富田梨惠,高坂真由,宫咲志帆,光咲玲奈,黑坂真美,胡桃泽马里奈,吉成香子,吉田亚咲,今井优,井坂绘美,久留须由美,蓝田美丰,笠木彩花,丰岛真千子,铃木美生,吉田亚纪子,瑠川淳子,美咲沙耶,美咲亚弥,美咲亚沙,梦咲亚由,乃木真利子,乃坂绘美,佐藤和沙,金子纱香,片濑梨子,中岛千晶,浅野京子,吉泽有希子,濑间幸美,金子美铃,三田爱里,三田友穂,三咲真绪,咲小雪,沙耶香,相田纱耶香,杉田美园,侍山凉子,山崎亚美,山咲萌,山咲千里,山咲舞,山咲亚香里,山咲一步,长濑美优,长濑美华,樱井美优,坂上友香,神乐坂惠,神田朱未,神田美咲,小岛圣,泽诗奈奈子,石川施恩惠,石坂伊津佳,水咲凉子,水咲亚里美,水野理纱,松坂庆子,松坂纱良,松坂树梨,滩坂舞,藤井树,齐藤小雪,藤咲彩美,藤咲理香,白石美树,品川幸菜,吉崎渚,田代沙织,田岛美美,泽田悠里,田中玲娜,田坂菜月,田坂仁美,舞坂由衣,香坂仁见,咲田葵,薰樱子,冴岛奈绪,野坂惠美,野坂奈津美,观月亚里沙,伊吹美奈裳,音咲洵,真锅薰,冈真里子,向井真理子,松坂季实子,椎名英姬,佐仓真衣,前田亚季,坂本冬美,坂本绘,蔡妍,裴涩琪,全慧彬,宝儿,张英兰,韩彩英尹恩惠,宋慧乔,李多海,成宥利,金泰熙,金喜善,李恩珠,韩佳人,亚由美,蔡琳,bada,张力尹(chinese),李贞贤,崔智友,全智贤,张娜拉,李孝利,梁美京,文根英,林秀晶,李英爱,金静华,张瑞希,林允儿,宋允儿李秀景,郑柔美,郑多彬,简美妍,金善雅,韩智慧,韩惠珍,南相美,黄静茵,金泰妍,郑丽媛,金荷娜,刘荷娜,河利秀,孙艺珍,徐智英,何智媛,崔秀英,stephanie天舞,金贤珠,李美妍,金雅中,朴善英郑秀妍,金智秀,皇甫惠静,韩智敏,秋瓷炫,dana,朴恩惠,韩孝珠,黄美英,金正恩,申敏儿,孔孝真,金素妍,权侑莉,禹喜珍,徐珠贤,韩艺瑟,李真,高雅拉,崔明吉,李智贤,李英雅,尹海英,林智慧,李妍喜,朴喜本,甄美里,seeya,申智(高耀太),李沇熹,金孝渊,金美淑,洪秀儿,金慧成,宋智孝,李瑶媛,朴贞雅,沈恩珍,lina上美,babyvox,崔真实,秋素英,李秀英,sunday智声,jewelry,金度完,申爱,朴信惠,金敏贞,李银珠,金南珠朴志胤,李智雅,姜恩菲,南圭丽,李青儿,高斗心,白智英,朴秀珍,朴艺珍,裴斗娜,闵先艺,赵静林,李太兰,金芝荷,李素妍,河莉秀,宣美,韩恩贞,金允慧,高恩雅,韩惠淑,沈银河,高贤贞崔茹真,李娜英,赵允熙,金莎朗,姜晶花,严正花,朴嘉熙,朴智运,闵孝琳,李宝英,玉珠铉,朴美宣,李惠淑,黄宝拉,朴幼林,吴妍秀,李姬珍,全度妍,徐智慧,李美淑,明世彬,韩艺仁,金南智,姜受延秀爱,李贤智,沈惠珍,赵贞恩,黄秀贞,钱忍和,申恩庆,洪秀贤,车贤静,张熙珍,金敏善,鲜于银淑,李荷娜,金泫雅,金孝珍,艺智苑,廉晶雅,孙泰英,上美lina,郑彩恩,贤真,金智慧,张熙真,朴素美,张真英,高小英,姜惠贞,金允珍,申爱罗,秋尚美,金喜爱,秋相美,车裕利,洪莉娜,金宝美,宋善美,李爱静,姜成妍,yuri,金惠秀,金敏喜,李清儿,郑在英,慧英,吴允儿,朴诗恩朴美善,苏幼真,李海英,崔贞润,韩银贞,金香奇,金孝真,崔志友,朴莎朗,金丽娜,孔孝珍,文素利,金圭莉,池秀媛,徐信爱,郑秀美,李敏贞,林贞恩,宋允雅,韩高恩,金贤雅,尹珍熙,崔允英,金贞兰,许伊在,韩爱莉,闵智慧,李惠英,金善儿,尹智慧,宋孝智,蔡徐坤,陈立农,范丞丞,黄明昊,林彦俊,朱正廷,王子异,王琳凯,尤长靖,毕雯珺,李希侃,黄新淳,李权哲,丁泽仁,秦奋,韩沐伯,徐鹤尼,左叶,罗正,陆定昊,董又霖,董岩磊,钱正昊,韩雍杰,木子洋,灵超,岳岳,卜凡,朱均天,朱均一,朱一文,张晨宇,应智越,万宇贤,吕晨瑜,宋微子,何东东,李长庚,张艺凡,李若天,邓烺怡,靖佩瑶,周腾阳,杨羿,张奕轩,姜京佐,许凯皓,凌崎,周彦辰,朱星杰,Rapen,Glgel,张昕,王宥辰,陈斯琪,于洁,武连杰,徐圣恩,张达源,陈名豪,王梓豪,金逸涵,甘俊,明鹏,侯浩然,余明君,姜祎,娄滋博,胡致邦,高茂桐,叶泓希,伽里,罗杰,李志杰,林浩楷,孙凡杰,于斌,何嘉庚,孙浩然,张晏恺,李俊毅,谭俊毅,黄书豪,陈义夫,闵喆祥,李让,周锐,郑瑞彬,林超泽,赵凌峰,赵俞澈,邱治谐,梁辉,杨非同,李汶翰,胡春杨,胡文煊,林陌,陈宥维,陈涛,李宗霖,嘉羿,邓泽鸣,卡斯柏,杨朝阳,邓超元,王喆,车慧轩,蒙恩,连淮维,夏瀚宇,姚明明,师铭泽,姜圣民,李之繁,管烁,易烊千玺,王俊凯,王源,丁程鑫,马嘉祺,张真源,敖子逸,李天泽,陈玺达,宋亚轩,刘耀文,贺俊霖,刘昊然,吴磊,郑凯,鹿晗,陈赫,李晨,邓超,包贝尔,王宝强,张翰,白敬亭,魏大勋,邓伦,汪苏泷,许嵩,徐良,张艺兴,道枝骏佑,片寄凉太,山崎贤人,黄子韬,吴世勋,边伯贤,朴灿烈,金钟大,金钟仁,都暻秀,金钟国,王嘉尔,刘宪华,杜海涛,沈腾,何炅,李维嘉,薛之谦,杨洋,华晨宇,纪凌尘,陈翔,车银尤,南赫柱,王力宏,胡先煦,马可,任嘉伦,李荣浩,艾芙杰尼,邢昭林,林更新,华少翌,黄晓明,韩寒,韩庚,韩磊,海岩,海清,海鸣威,胡军,胡海泉,胡歌,吴亦凡,张国立,唐国强,姜文,葛优,黄渤,陈坤,张一山,王传君,于和伟,刘欢,林志炫,徐峥,金志文,蒋昌建,品冠,张伟,杜飞,雷佳音,欧豪,高云翔,刘奕君,佟大为,郭京飞,张鲁一,王凯,霍建华,张钧甯,林子闳,马振桓,宋芸桦,邵雨薇,任言恺,欧阳娜娜,宇宙,连晨翔,瑞莎,昆凌,杜天皓,芮娜,苏笠汶,李玉玺,李威,明道,梁又琳,游智翔,寇世勋,雨婷,沈建宏,姚蜜,萧敬腾,向以丞,黄心娣,柯佳嬿,殷悦,邵崇柏,余晋,高以翔,黄郁婷,谢依霖,潘仪君,苏见信,张熙恩,石知田,柯宇纶,寇家瑞,陈威翰,柯家豪,康茵茵,康康,柯雅馨,王耀庆,王大陆,郭采洁,文雨非,蔡芷纭,陈奕,简远信,文淇,李凯馨,陈佩骐,黄维德,张轩睿,蔡颐榛,徐洁儿,王雅慧,陈柏融,王陈怡娴,邱胜翊,陈楚河,戴君竹,Teddy,锦荣,黄伟晋,张震,金士杰,吴中天,陈柏霖,郭雪芙,张庭,凤小岳,方妍心,林柏宏,欧阳妮妮,李鸿其,谢翔雅,杜妍,刘德凯,江语晨,安钧璨,李立群,白歆惠,陈薇,黄柏钧,许名杰,田中千绘,郑靓歆,那维勋,程席榆,纪欣妤,胡因梦,田丽,何海东,林佑威,陈怡真,陈盈燕,郭品超,黄文豪,小甜甜,吴若瑄,邱心志,林宥嘉,安心亚,韩忠羽,倪安东,修杰楷,常铖,禾浩辰,王轶玲,陈德修,倪齐民,易柏辰,丁文琪,李又麟,官鸿,洪小铃,王以纶,李程彬,屈中恒,王阳明,王棠云,葛蕾,郝劭文,邱昊奇,李天柱,狄志杰,陈亦飞,林韦君,温升豪,桂纶镁,张皓明,郑开元,黄少祺,陈敬宣,何恋慈,潘迎紫,阿本,方芳,张立昂,庄濠全,归亚蕾,王思懿,李毓芬,杜姸,张天霖,许玮甯,包小柏,关颖,苟芸慧,简廷芮,杨丽菁,陈庭妮,陈匡怡,魏蔓,张玉嬿,谢欣颖,陈博正,徐嘉苇,邓美恩,陈艾熙,郭书瑶,高凌风,谢和弦,李铨,徐贵樱,许富翔,张榕容,陈玺安,赖煜哲,金燕玲,亚里,连静雯,张国柱,许雅钧,白梓轩,杨世瀚,刘以豪,戴立忍,谢祖武,张孝全,夏若妍,唐国忠,陈乃荣,易恩,秦杨,王宥胜,蔡淑臻,高捷,小煜,刘乐妍,王维琳,刘瑞琪,张睿家,林逸欣,曾佩瑜,郭思辰,安娜,杜孟竹,吴可熙,陈汉典,黄瀞怡,蔡黄汝,叶全真,安晨芯,秦汉,纪亚文,吴心缇,陈尚泽,曲澔浚,李依瑾,陈艾琳,陈语安,纪培慧,立威廉,猪哥亮,杨青倩,陈建豪,是元介,刘冠毅,谢盈萱,郭鑫,孙克杰,张勋杰,张静懿,陈庭萱,俞小凡,詹子晴,郑暐达,风田,赖宗赢,曹华恩,张行,蔡旻佑,张景岚,江宏恩,孟庭丽,宋新妮,刘奕儿,王诗安,海狗,宋达民,吕孔维,蔡维泽,杨谨华,周凯文,卓毓彤,朱主爱,庹宗华,姚凤凤,刘国劭,孙兴,王心嫚,田家达,夏台凤,姚元浩,萧蔷,席惟伦,陈羽凡,陈昭荣,周子瑜,金超群,谢坤达,傅雷,姬天语,费翔,阿喜,周丹莉,李国超,沈彦廷,沈海蓉,曾子余,潘慧如,李志希,黄牛,黄丽玲,陶嫚曼,蓝心湄,纪言恺,刘俊纬,黄立行,方志友,吴思贤,宫以腾,包小松,徐佳莹,杨千霈,陈景炀,胡玮杰,龙隆,李运庆,关诗敏,刘泯廷,许韶洋,谢语恩,施羽,王家梁,谭艾珍,王彩桦,龚继安,艾伟,邓丽君,徐敏,钱韦杉,吴辰君,杨贵媚,安东尼,王灿,林若亚,Selina,张瑀希,周绍栋,刘汉强,古斌,阿雅,郭源元,贺一航,吕雪凤,唐治平,林美秀,莫允雯,李元太,游艾迪,安哲,张佩华,夏靖庭,唐德惠,侯彦西,李罗,周俐葳,陈冠霖,龚海铭,张芯瑜,许雅涵,刘芳芸,BY2,蔡健雅,李维维,李沛旭,许光汉,简嫚书,陈为民,李国毅,林妍柔,涂世旻,林予晞,杨升达,辰烨,李佳颖,周孝安,林秀君,秦风,邓九云,谢雅琳,舒子晨,翁卡特,夏语心,傅传杰,黄腾浩,胡睿儿,陈若萍,张雁名,阿信,李妍憬,翁滋蔓,大飞,江淑娜,蓝正龙,林筱筠,梁如瑄,阿Ken,盛鉴,赵擎,素珠,郑家榆,郑人硕,李康宜,林芯蕾,周丹薇,杨琪,安佑德,黄裕翔,周咏轩,蔡昌宪,钟欣凌,安程希,张立威,郎祖筠,刘谦,司徒颖霜,丁当,陈武康,喻虹渊,王者心,卫子云,迪丽热巴,成毅,陈学冬,安悦溪,姜妍,杨紫,郑爽,关晓彤,何明翰,徐梵溪,李沁,宋轶,乔欣,王鸥,古力娜扎,张馨予,麦迪娜,张铭恩,张檬,吴优,尹正,罗云熙,陈瑶,侯明昊,蒋欣,张云龙,杨烁,胡冰卿,靳东,牟星,袁姗姗,张彬彬,罗晋,秦俊杰,赵圆瑗,唐艺昕,李纯,梁振伦,吴倩,米露,王子文,TFBOYS,陈晓,张若昀,晁然,陈梦希,乔任梁,孙佳雨,李宏毅,林秋楠,茅子俊,唐娜,赖雨蒙,马思纯,于朦胧,徐璐,宋茜,杨菲洋,阚清子,孙耀琦,张萌,田倚凡,徐海乔,王妍之,戴向宇,刘芊含,熊梓淇,蔡文静,杨蓉,王珊,李心艾,何杜娟,王洋,井柏然,张雪迎,赵韩樱子,曹曦月,姜潮,孙怡,周冬雨,米勒,焦俊艳,杨子姗,逯恣祯,李溪芮,刘美含,刘萌萌,景甜,李一桐,杨旭文,梁洁,柳希龙,郑业成,李晟,陈小纭,黄礼格,林允,徐冬冬,宋威龙,袁冰妍,付辛博,沈梦辰,陈钰琪,方文强,鄂博,阎汶宣,牛骏峰,李小萌,白雪,张哲瀚,鞠婧祎,吕佳容,张浩,施诗,刘敏涛,王亭文,范世錡,海陆,马薇,邢菲,郭俊辰,孔垂楠,施予斐,穆婷婷,盛一伦,任天野,杨舒,刘庭羽,严屹宽,金晨,王晓晨,李砚,尹淇,韩栋,康杰,韩东君,于晓光,陈洁,张维娜,杨之楹,郑恺,蒋蕊泽,林源,张芷溪,苏青,蓝波,杨志刚,白百何,李佳奇,晏紫东,李健,高伟光,李进荣,张子枫,孙骁骁,米热,于震,宋小宝,刘芮麟,宋祖儿,李兰迪,康磊,叶祖新,于毅,蒋依依,李林娟,刘波,胡一天,石悦安鑫,娄清,刘娜萍,秦语,刘楚恬,王千源,阮圣文,孙雪宁,李倩,祝绪丹,母其弥雅,刘雅瑟,应昊茗,刘启恒,李梦,宋妍霏,金瀚,尹智玄,虞朗,袁志博,刘敏,任重,吴昊宸,孙艺洲,刘笑歌,毛林林,何泓姗,李郁瞳,袁泉,宋佳,马蜀君,尤靖茹,聂子皓,尚语贤,俞灏明,黄轩,吕卓燃,李飞,周奇奇,张予曦,刘文曲,刘秋实,李蓓蕾,潘之琳,梁超,泓萱,苗驰,卢宇静,童菲,王美人,刘颖伦,熊乃瑾,张碧晨,刘黛希,马秋子,刘烨,宋家腾,买红妹,祖峰,瑛子,江珊,杨雨婷,刘成瑞,李金铭,柯蓝,艾晓琪,刘馨棋,孙铱,白宇,杜淳,马春瑞,雨婷儿,杨帆,隋雨蒙,李颖,何晟铭,梁亦芸,陆子艺,张雨绮,于月仙,孙坚,岳丽娜,袁文康,梁浩,盛冠森,李春嫒,阳蕾,刘恩佑,杨梓墨,李子峰,吴谨言,张睿,孙茜,李茂,荣梓杉,陆妍淇,荣蓉,任帅,黄一琳,原雨,殷桃,辛芷蕾,翟天临,李欣聪,张晓谦,李雨轩,王珂,刘智扬,张楷依,于函冰,柴碧云,刘倩文,于小彤,尹铸胜,于笑,吉克隽逸,贾乃亮,童苡萱,宋奕星,刘雨欣,刘琳,姜鸿,安泳畅,王乐君,杨曦,于越,张晓晨,袁雨萱,马丽,林思意,陆昱霖,宋伊人,甘婷婷,刘天佐,李程,林静,李明德,林永健,冉旭,苏倩薇,叶峰,任柯诺,刘芸,叶青,马程程,卢杉,刘恬汝,丁婷婷,梁晶晶,李乃文,季晨,梁林琳,林江国,杀漠,李帅,张梦露,张峻宁,刘冠翔,刘丛丹,尹馨梓,路晨,米紫安,杨昆,李呈媛,徐申东,虞书欣,余心恬,梁爱琪,申奥,林千鹿,李昕岳,李萌萌,沈航,刘潺,穆乐恩,王彦霖,周雨彤,侍宣如,李嘉雯,王闯,李泽,余皑磊,苑子艺,赵予熙,李芯逸,刘钇彤,侯佩杉,侯梦瑶,姚奕辰,石安妮,施潮锦子,井星文,马晓伟,马灿灿,隋咏良,楼佳悦,陈思成,岳旸,马靓,陈都灵,李茜,卢星宇,李一情,何穗,李超,张炯敏,沙溢,王挺,SNH48,陆骏瑶,陈雅婷,李依伊 diff --git a/make_datasets/make_pix2pix_dataset.py b/make_datasets/make_pix2pix_dataset.py index 7dcae25..c87dc30 100644 --- a/make_datasets/make_pix2pix_dataset.py +++ b/make_datasets/make_pix2pix_dataset.py @@ -16,7 +16,7 @@ from models import runmodel,loadmodel import util.image_processing as impro -from util import util,mosaic,data +from util import degradater, util,mosaic,data opt.parser.add_argument('--datadir',type=str,default='../datasets/draw/face', help='') @@ -107,11 +107,11 @@ saveflag = True if opt.mod == ['drawn','irregular']: - x,y,size,area = impro.boundingSquare(mask_drawn, random.uniform(1.2,1.6)) + x,y,size,area = impro.boundingSquare(mask_drawn, random.uniform(1.1,1.6)) elif opt.mod == ['network','irregular']: - x,y,size,area = impro.boundingSquare(mask_net, random.uniform(1.2,1.6)) + x,y,size,area = impro.boundingSquare(mask_net, random.uniform(1.1,1.6)) else: - x,y,size,area = impro.boundingSquare(mask, random.uniform(1.2,1.6)) + x,y,size,area = impro.boundingSquare(mask, random.uniform(1.1,1.6)) if area < 1000: saveflag = False @@ -130,11 +130,15 @@ if saveflag: # add mosaic img_mosaic = mosaic.addmosaic_random(img, mask) - # random blur + # random degradater if random.random()>0.5: - Q = random.randint(1,15) - img = impro.dctblur(img,Q) - img_mosaic = impro.dctblur(img_mosaic,Q) + degradate_params = degradater.get_random_degenerate_params(mod='weaker_2') + img = degradater.degradate(img,degradate_params) + img_mosaic = degradater.degradate(img_mosaic,degradate_params) + # if random.random()>0.5: + # Q = random.randint(1,15) + # img = impro.dctblur(img,Q) + # img_mosaic = impro.dctblur(img_mosaic,Q) savecnt += 1 diff --git a/make_datasets/make_video_dataset.py b/make_datasets/make_video_dataset.py index 64c7fb7..84ef5fa 100644 --- a/make_datasets/make_video_dataset.py +++ b/make_datasets/make_video_dataset.py @@ -14,7 +14,7 @@ from models import runmodel,loadmodel import util.image_processing as impro -from util import util,mosaic,data,ffmpeg +from util import filt, util,mosaic,data,ffmpeg opt.parser.add_argument('--datadir',type=str,default='your video dir', help='') @@ -93,30 +93,65 @@ imagepaths = util.Traversal(opt.temp_dir+'/video2image') imagepaths = sorted(imagepaths) imgs=[];masks=[] - mask_flag = False - + # mask_flag = False + # for imagepath in imagepaths: + # img = impro.imread(imagepath) + # mask = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0] + # imgs.append(img) + # masks.append(mask) + # if not mask_flag: + # mask_avg = mask.astype(np.float64) + # mask_flag = True + # else: + # mask_avg += mask.astype(np.float64) + + # mask_avg = np.clip(mask_avg/len(imagepaths),0,255).astype('uint8') + # mask_avg = impro.mask_threshold(mask_avg,20,64) + # if not opt.all_mosaic_area: + # mask_avg = impro.find_mostlikely_ROI(mask_avg) + # x,y,size,area = impro.boundingSquare(mask_avg,Ex_mul=random.uniform(1.1,1.5)) + + # for i in range(len(imagepaths)): + # img = impro.resize(imgs[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) + # mask = impro.resize(masks[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) + # impro.imwrite(os.path.join(origindir,'%05d'%(i+1)+'.jpg'), img) + # impro.imwrite(os.path.join(maskdir,'%05d'%(i+1)+'.png'), mask) + ex_mul = random.uniform(1.2,1.7) + positions = [] for imagepath in imagepaths: img = impro.imread(imagepath) mask = runmodel.get_ROI_position(img,net,opt,keepsize=True)[0] imgs.append(img) masks.append(mask) - if not mask_flag: - mask_avg = mask.astype(np.float64) - mask_flag = True - else: - mask_avg += mask.astype(np.float64) - - mask_avg = np.clip(mask_avg/len(imagepaths),0,255).astype('uint8') - mask_avg = impro.mask_threshold(mask_avg,20,64) - if not opt.all_mosaic_area: - mask_avg = impro.find_mostlikely_ROI(mask_avg) - x,y,size,area = impro.boundingSquare(mask_avg,Ex_mul=random.uniform(1.1,1.5)) - - for i in range(len(imagepaths)): - img = impro.resize(imgs[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) + x,y,size,area = impro.boundingSquare(mask,Ex_mul=ex_mul) + positions.append([x,y,size]) + positions =np.array(positions) + for i in range(3):positions[:,i] = filt.medfilt(positions[:,i],opt.medfilt_num) + + for i,imagepath in enumerate(imagepaths): + x,y,size = positions[i][0],positions[i][1],positions[i][2] + tmp_cnt = i + while sizeopt.minsize//4: + # if not opt.all_mosaic_area: + # mask_avg = impro.find_mostlikely_ROI(mask_avg) + # x,y,size,area = impro.boundingSquare(mask_avg,Ex_mul=ex_mul) + # img = impro.resize(imgs[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) + # mask = impro.resize(masks[i][y-size:y+size,x-size:x+size],opt.outsize,interpolation=cv2.INTER_CUBIC) + # impro.imwrite(os.path.join(origindir,'%05d'%(i+1)+'.jpg'), img) + # impro.imwrite(os.path.join(maskdir,'%05d'%(i+1)+'.png'), mask) + result_cnt+=1 diff --git a/models/loadmodel.py b/models/loadmodel.py index 16124c4..7d9b391 100755 --- a/models/loadmodel.py +++ b/models/loadmodel.py @@ -51,7 +51,7 @@ def style(opt): return netG def video(opt): - netG = video_G(N=2,n_blocks=1,gpu_id=opt.gpu_id) + netG = video_G(N=2,n_blocks=4,gpu_id=opt.gpu_id) show_paramsnumber(netG,'netG') netG.load_state_dict(torch.load(opt.model_path)) netG = model_util.todevice(netG,opt.gpu_id) diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..23c0f2c --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +opencv_python==4.5.1.48 +numpy==1.19.2 +torchvision==0.8.2 +torch==1.7.1 +matplotlib==3.3.2 +tensorboardX==2.2 +scikit-image==0.17.2 \ No newline at end of file diff --git a/server.py b/server.py new file mode 100644 index 0000000..f76ca1c --- /dev/null +++ b/server.py @@ -0,0 +1,60 @@ +import os +import sys +import traceback +import cv2 +import numpy as np +try: + from cores import Options,core + from util import util + from util import image_processing as impro + from models import loadmodel +except Exception as e: + print(e) + input('Please press any key to exit.\n') + sys.exit(0) + +# python server.py --gpu_id 0 --model_path ./pretrained_models/mosaic/clean_face_HD.pth +opt = Options() +opt.parser.add_argument('--port',type=int,default=4000, help='') +opt = opt.getparse(True) +netM = loadmodel.bisenet(opt,'mosaic') +netG = loadmodel.pix2pix(opt) + +from flask import Flask, request +import base64 +import shutil + +app = Flask(__name__) + +@app.route("/handle", methods=["POST"]) +def handle(): + result = {} + # to opencv img + try: + imgRec = request.form['img'] + imgByte = base64.b64decode(imgRec) + img_np_arr = np.frombuffer(imgByte, np.uint8) + img = cv2.imdecode(img_np_arr, cv2.IMREAD_COLOR) + except Exception as e: + result['img'] = imgRec + result['info'] = 'readfailed' + return result + + # run model + try: + if max(img.shape)>1080: + img = impro.resize(img,720,interpolation=cv2.INTER_CUBIC) + img = core.cleanmosaic_img_server(opt,img,netG,netM) + except Exception as e: + result['img'] = imgRec + result['info'] = 'procfailed' + return result + + # return + imgbytes = cv2.imencode('.jpg', img)[1] + imgString = base64.b64encode(imgbytes).decode('utf-8') + result['img'] = imgString + result['info'] = 'ok' + return result + +app.run("0.0.0.0", port= opt.port, debug=opt.debug) \ No newline at end of file diff --git a/train/add/train.py b/train/add/train.py index a2f04ef..a939ee3 100644 --- a/train/add/train.py +++ b/train/add/train.py @@ -71,8 +71,8 @@ def loadimage(imagepaths,maskpaths,opt,test_flag = False): img,mask = data.random_transform_pair_image(img, mask, opt.finesize, test_flag) images[i] = (img.transpose((2, 0, 1))/255.0) masks[i] = (mask.reshape(1,1,opt.finesize,opt.finesize)/255.0) - images = Totensor(images,opt.gpu_id) - masks = Totensor(masks,opt.gpu_id) + images = data.to_tensor(images,opt.gpu_id) + masks = data.to_tensor(masks,opt.gpu_id) return images,masks diff --git a/train/clean/train.py b/train/clean/train.py index cf8371c..6ea23b7 100644 --- a/train/clean/train.py +++ b/train/clean/train.py @@ -33,13 +33,13 @@ opt.parser.add_argument('--batchsize',type=int,default=1, help='') opt.parser.add_argument('--no_gan', action='store_true', help='if specified, do not use gan') opt.parser.add_argument('--n_blocks',type=int,default=4, help='') -opt.parser.add_argument('--n_layers_D',type=int,default=1, help='') +opt.parser.add_argument('--n_layers_D',type=int,default=2, help='') opt.parser.add_argument('--num_D',type=int,default=3, help='') opt.parser.add_argument('--lambda_L2',type=float,default=100, help='') opt.parser.add_argument('--lambda_VGG',type=float,default=1, help='') -opt.parser.add_argument('--lambda_GAN',type=float,default=1, help='') +opt.parser.add_argument('--lambda_GAN',type=float,default=0.01, help='') opt.parser.add_argument('--lambda_D',type=float,default=1, help='') -opt.parser.add_argument('--load_thread',type=int,default=4, help='number of thread for loading data') +opt.parser.add_argument('--load_thread',type=int,default=16, help='number of thread for loading data') opt.parser.add_argument('--dataset',type=str,default='./datasets/face/', help='') opt.parser.add_argument('--dataset_test',type=str,default='./datasets/face_test/', help='') @@ -83,6 +83,7 @@ def ShowImage(tensor1,tensor2,tensor3,showiter,max_num,writer,tag): localtime = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) tensorboard_savedir = os.path.join('checkpoints/tensorboard',localtime+'_'+opt.savename) TBGlobalWriter = SummaryWriter(tensorboard_savedir) +print('Please run "tensorboard --logdir checkpoints/tensorboardX --host=your_server_ip" and input "'+localtime+'" to filter outputs') ''' --------------------------Init Network-------------------------- diff --git a/util/data.py b/util/data.py index 60628a5..8a7865e 100755 --- a/util/data.py +++ b/util/data.py @@ -1,5 +1,6 @@ import random import os +from util.mosaic import get_random_parameter import numpy as np import torch import torchvision.transforms as transforms @@ -134,7 +135,7 @@ def random_transform_single_image(img,finesize,params=None,test_flag = False): params['rate']['color'][2],params['rate']['color'][3],params['rate']['color'][4]) if params['flag']['flip']: - img = img[:,::-1,:] + img = img[:,::-1] #check shape if img.shape[0]!= finesize or img.shape[1]!= finesize: @@ -143,58 +144,11 @@ def random_transform_single_image(img,finesize,params=None,test_flag = False): return img def random_transform_pair_image(img,mask,finesize,test_flag = False): - #random scale - if random.random()<0.5: - h,w = img.shape[:2] - loadsize = min((h,w)) - a = (float(h)/float(w))*random.uniform(0.9, 1.1) - if h