# 用机器学习做个艺术画家-Prisma(上)

1. 吸收用户拍摄的照片
2. 让计算机学会星空图的风格
3. 计算机输出自己做的“新画”

Prisma 比 Deep Art 先进的地方在于，它大大缩短了图像处理的时间，每张照片在 Prisma 系统内的处理时间控制在秒级别。 prisma诞生于俄罗斯，是一个仅有4个年轻人历时一个半月开发出的图片处理应用，将照片赋予毕加索式的艺术风格，是它们的广告语，它的核心技术思想就是卷积神经网络可以被看做是一个机器艺术家

from __future__ import division
import matplotlib.pylab as plt
%matplotlib inline

import os
from PrismaCaffe import CaffePrismaClass
from PrismaTensor import TensorPrismaClass
import PrismaTensor
import PrismaHelper
import glob
import numpy as np
import PIL.Image
import ZCommonUtil
import itertools

### 1 基于caffe框架实现prisma

1.1 首先我们直观感受一下什么叫做机器艺术家

sample_list = glob.glob("../sample/*.jpg")
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(15, 6));
axs_list = list(itertools.chain.from_iterable(axs))
for ind, ax in zip(range(2 * 4), axs_list):
iter_fn = sample_list[ind]
ax.set_title(os.path.basename(iter_fn))
ax.imshow(iter_img);
ax.set_axis_off()

cp  = CaffePrismaClass(dog_mode=False)
nbks = filter(lambda nbk: nbk[-8:-1] <> '_split_', cp.net.blobs.keys()[1:-2])[:10]
abu4_file = '../sample/abu4.jpg'
PrismaHelper.show_array_ipython(np.float32(cp.resize_img(PIL.Image.open(abu4_file))))
nbks

['conv1/7x7_s2',
'pool1/3x3_s2',
'pool1/norm1',
'conv2/3x3_reduce',
'conv2/3x3',
'conv2/norm2',
'pool2/3x3_s2',
'inception_3a/1x1',
'inception_3a/3x3_reduce',
'inception_3a/3x3']

for nbk in nbks[2:-2]:
d_img = cp.fit_img(abu4_file, resize=True, nbk=nbk, iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))

fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(20, 10));
up_list = ['../show/up2.jpg', '../show/up3.jpg', '../show/up4.jpg']
for ind, ax in zip(range(1 * 3), axs):
iter_fn = up_list[ind]
# ax.set_title(os.path.basename(iter_fn))
ax.imshow(iter_img);
ax.set_axis_off()

class CaffePrismaClass(BasePrismaClass):
def __init__(self, dog_mode=False):
self.net_fn = '../mode/deploy.prototxt'

if not dog_mode:
mu = np.float32([104.0, 117.0, 123.0])
else:
self.param_fn = '../mode/dog_judge_train_iter_5000.caffemodel'
model_mean_file = '../mode/mean.binaryproto'
mean_blob = caffe.proto.caffe_pb2.BlobProto()
mean_npy = caffe.io.blobproto_to_array(mean_blob)
mu = np.float32(mean_npy.mean(2).mean(2)[0])

model = caffe.io.caffe_pb2.NetParameter()
model.force_backward = True

open('tmp.prototxt', 'w').write(str(model))
self.net = caffe.Classifier('tmp.prototxt', self.param_fn,
mean=mu,
channel_swap=(
2, 1, 0))

transform_param {
mirror: true
crop_size: 224
mean_value: 104
mean_value: 117
mean_value: 123
}

abu1_file = '../sample/abu1.jpg'
PrismaHelper.show_array_ipython(np.float32(cp.resize_img(PIL.Image.open(abu1_file))))

1.2 直接使用某个神经元层的效果

d_img = cp.fit_img(abu1_file, resize=True, nbk='conv1/7x7_s2', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))

1.3 配合PIL预处理方式处理图像

class BasePrismaClass(six.with_metaclass(ABCMeta, object)):
@abstractmethod
def fit_guide_img(self, img_path, gd_path, resize=False, size=480, enhance=None, iter_n=10, **kwargs):
pass

@abstractmethod
def fit_img(self, img_path, resize=False, size=480, enhance=None, iter_n=10, **kwargs):
pass

@abstractmethod
def gd_features_make(self, *args, **kwargs):
pass

@abstractmethod
def do_prisma(self, *args, **kwargs):
pass

def resize_img(self, r_img, base_width=480, keep_size=True):
if keep_size:
w_percent = (base_width / float(r_img.size[0]))
h_size = int((float(r_img.size[1]) * float(w_percent)))
else:
h_size = base_width
r_img = r_img.resize((base_width, h_size), PIL.Image.ANTIALIAS)
return r_img

def handle_enhance(self, r_img, enhance, sharpness=8.8, brightness=1.8, contrast=2.6, color=7.6, contour=2.6):
if enhance == 'Sharpness':
enhancer = ImageEnhance.Sharpness(r_img)
s_img = enhancer.enhance(sharpness)
img = s_img
elif enhance == 'Brightness':
enhancer = ImageEnhance.Brightness(r_img)
b_img = enhancer.enhance(brightness)
img = b_img
elif enhance == 'Contrast':
enhancer = ImageEnhance.Contrast(r_img)
t_img = enhancer.enhance(contrast)
img = t_img
elif enhance == 'Color':
enhancer = ImageEnhance.Color(r_img)
c_img = enhancer.enhance(color)
img = c_img
elif enhance == 'CONTOUR':
enhancer = ImageEnhance.Contrast(r_img)
t_img = enhancer.enhance(contour)
fc_img = t_img.filter(ImageFilter.CONTOUR)
img = fc_img
elif enhance == 'EDGES':
ffe_img = r_img.filter(ImageFilter.FIND_EDGES)
img = ffe_img
elif enhance == 'EMBOSS':
feb_img = r_img.filter(ImageFilter.EMBOSS)
img = feb_img
elif enhance == 'EEM':
feem_img = r_img.filter(ImageFilter.EDGE_ENHANCE_MORE)
img = feem_img
elif enhance == 'EE':
fee_img = r_img.filter(ImageFilter.EDGE_ENHANCE)
img = fee_img
else:
img = r_img
return img

d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))
d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='Sharpness', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))
d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='Contrast', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))
# d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='Brightness', iter_n=10)
# PrismaHelper.show_array_ipython(np.float32(d_img))
# d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='CONTOUR', iter_n=10)
# PrismaHelper.show_array_ipython(np.float32(d_img))
# d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='Color', iter_n=10)
# PrismaHelper.show_array_ipython(np.float32(d_img))
# d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='EEM', iter_n=10)
# PrismaHelper.show_array_ipython(np.float32(d_img))
# d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3', enhance='EE', iter_n=10)
# PrismaHelper.show_array_ipython(np.float32(d_img))

elif enhance == 'CONTOUR':
enhancer = ImageEnhance.Contrast(r_img)
t_img = enhancer.enhance(contour)
fc_img = t_img.filter(ImageFilter.CONTOUR)
img = fc_img

def fit_batch_img(self, img_path, resize=False, size=480, enhance=None):
"""
批量处理，但不支持并行，后修改为类似tensor prisma中的并行模式
:param img_path:
:param resize:
:param size:
:param enhance:
:return:
"""
r_img = PIL.Image.open(img_path)
if resize:
r_img = self.resize_img(r_img, size)

org_img = self.handle_enhance(r_img, enhance)

e_str = '' if enhance is None else '_' + enhance.lower()
save_path = os.path.dirname(img_path) + '/batch_caffe/' + e_str
ZCommonUtil.ensure_dir(save_path)
org_img_path = save_path + 'org.jpeg'
with open(org_img_path, 'w') as f:
org_img.save(f, 'jpeg')

org_img = np.float32(org_img)
start = 1
end = self.net.blobs.keys().index('inception_4c/pool')
nbks = self.net.blobs.keys()[start:end]

"""
不能使用多进程方式在这里并行执行，因为caffe.classifier.Classifier不支持序列化
Pickling of "caffe.classifier.Classifier" instances is not enabled
so mul process no pass
"""
for nbk in nbks:
if nbk[-8:-1] == '_split_':
continue
fn = save_path + nbk.replace('/', '_') + '.jpg'
deep_img = self.do_prisma(org_img, iter_n=10, end=nbk)
PrismaHelper.save_array_img(deep_img, fn)
return save_path

def _objective_l2(self, dst):
dst.diff[:] = dst.data

def _objective_guide_features(self, dst, guide_features):
x = dst.data[0].copy()
y = guide_features
ch = x.shape[0]
x = x.reshape(ch, -1)
y = y.reshape(ch, -1)
a = x.T.dot(y)
dst.diff[0].reshape(ch, -1)[:] = y[:, a.argmax(1)]

def do_prisma_step(self, step_size=1.5, end='inception_4c/output',
jitter=32, objective=None):
if objective is None:
raise ValueError('make_step objective is None!!!')

src = self.net.blobs['data']
dst = self.net.blobs[end]
ox, oy = np.random.randint(-jitter, jitter + 1, 2)
src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2)

self.net.forward(end=end)
objective(dst)
self.net.backward(start=end)

g = src.diff[0]
src.data[:] += step_size / np.abs(g).mean() * g
src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2)

def do_prisma(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output', **step_params):
octaves = [PrismaHelper.preprocess_with_roll(base_img, self.mean_pixel)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))
src = self.net.blobs['data']
detail = np.zeros_like(octaves[-1])
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)
src.reshape(1, 3, h, w)
src.data[0] = octave_base + detail
for i in xrange(iter_n):
self.do_prisma_step(end=end, **step_params)
detail = src.data[0] - octave_base
return PrismaHelper.deprocess_with_stack(src.data[0], self.mean_pixel)

def gd_features_make(self, guide, end):
h, w = guide.shape[:2]
src, dst = self.net.blobs['data'], self.net.blobs[end]
src.reshape(1, 3, h, w)
src.data[0] = PrismaHelper.preprocess_with_roll(guide, self.mean_pixel)
self.net.forward(end=end)
guide_features = dst.data[0].copy()
return guide_features

def fit_batch_img(self, img_path, resize=False, size=480, enhance=None):
"""
批量处理，但不支持并行，后修改为类似tensor primsma中的并行模式
:param img_path:
:param resize:
:param size:
:param enhance:
:return:
"""
r_img = PIL.Image.open(img_path)
if resize:
r_img = self.resize_img(r_img, size)

org_img = self.handle_enhance(r_img, enhance)

e_str = '' if enhance is None else '_' + enhance.lower()
save_path = os.path.dirname(img_path) + '/batch_caffe/' + e_str
ZCommonUtil.ensure_dir(save_path)
org_img_path = save_path + 'org.jpeg'
with open(org_img_path, 'w') as f:
org_img.save(f, 'jpeg')

org_img = np.float32(org_img)
start = 1
end = self.net.blobs.keys().index('inception_4c/pool')
nbks = self.net.blobs.keys()[start:end]

"""
不能使用多进程方式在这里并行执行，因为caffe.classifier.Classifier不支持序列化
Pickling of "caffe.classifier.Classifier" instances is not enabled
so mul process no pass
"""
for nbk in nbks:
if nbk[-8:-1] == '_split_':
continue
fn = save_path + nbk.replace('/', '_') + '.jpg'
deep_img = self.do_prisma(org_img, iter_n=10, end=nbk)
PrismaHelper.save_array_img(deep_img, fn)
return save_path

def show_lydw(fd_fn):
sample_list = glob.glob(fd_fn)
sample_list = sample_list[::-1]
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(20, 10));
for ind, ax in zip(range(1 * 2), axs):
iter_fn = sample_list[ind]
ax.set_title(os.path.basename(iter_fn))
ax.imshow(iter_img);
ax.set_axis_off()

show_lydw('../show/bj*.jpg')

show_lydw('../show/nr*.jpg')

1.4 使用引导图进行风格引导

img_gd_list = glob.glob("../prisma_gd/*.jpg")

for img_gd in img_gd_list:
width = 224
hsize = 224
org_img_gd = PIL.Image.open(img_gd)
r_img_gd = org_img_gd.resize((width, hsize), PIL.Image.ANTIALIAS)
filen_ame = '../prisma_gd_224/' + os.path.basename(img_gd)
ZCommonUtil.ensure_dir(filen_ame)
with open(filen_ame, 'w') as f:
r_img_gd.save(f, 'jpeg')

img_gd_list = glob.glob("../prisma_gd_224/*.jpg")
gd_path = '../prisma_gd_224/tooopen_sy_127260228921.jpg'
guide = np.float32(PIL.Image.open(gd_path))
PrismaHelper.show_array_ipython(guide)

d_img = cp.fit_img(abu1_file, resize=True, nbk='conv2/3x3_reduce', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))

d_img = cp.fit_guide_img(abu1_file, gd_path, resize=True, nbk='conv2/3x3_reduce', iter_n=10)
PrismaHelper.show_array_ipython(np.float32(d_img))

### 2 基于tensorflow框架实现prisma

img_gd_list = glob.glob("../prisma_gd/*.jpg")

fig, axs = plt.subplots(nrows=5, ncols=8, figsize=(30, 15));
axs_list = list(itertools.chain.from_iterable(axs))
for ind, ax in zip(range(5 * 8), axs_list):
iter_fn = img_gd_list[ind]
ax.set_title(os.path.basename(iter_fn).split('.')[0])
ax.imshow(iter_img);
ax.set_axis_off()

2.1 实例化一个封装好的tensorflow风格画实例

tp = TensorPrismaClass()
tp

mean_pixel: [ 123.68   116.779  103.939]

TensorPrismaClass实现原理是低层次的卷积核学习特征纹理 颜色，边界等粗线条，高层次卷积核学到的是底层特征叠加所产生的形状内容特征，最终风格画的效果是由各个层特征分配权重组合而成，不断迭代计算loss function，来寻找图像的特征分配权重，详情代码请查阅github上的PrismaTensor.py，核心代码如下：

def _conv2d(self, img, w, b):
return tf.nn.bias_add(tf.nn.conv2d(img, tf.constant(w), strides=[1, 1, 1, 1], padding='SAME'), b)

def _max_pool(self, img, k):
return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')

def __init__(self):
self.net_fn = K_VGG_MAT_PATH

if not ZCommonUtil.file_exist(self.net_fn):
raise RuntimeError('self.net_fn not exist!!!')

self.net_layers = K_NET_LAYER
self.mean = self.net_data['normalization'][0][0][0]
self.mean_pixel = np.mean(self.mean, axis=(0, 1))
self.weights = self.net_data['layers'][0]

def _build_vgg_net(self, shape, image_tf=None):
if image_tf is None:
image_tf = tf.placeholder('float', shape=shape)
net = dict()
current = image_tf
for ind, name in enumerate(self.net_layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = self.weights[ind][0][0][0][0]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = self._conv2d(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = self._max_pool(current, 2)
net[name] = current
return net, image_tf

def _features_make(self, img, image_tf, net, features, guide):
preprocess = np.array([img - self.mean_pixel])
if guide:
for gl in K_GUIDE_LAYERS:
fs = net[gl].eval(feed_dict={image_tf: preprocess})
fs = np.reshape(fs, (-1, fs.shape[3]))
features[gl] = np.matmul(fs.T, fs) / fs.size
else:
features[K_ORG_LAYER] = net[K_ORG_LAYER].eval(feed_dict={image_tf: preprocess})

def _tensor_size(self, tensor):
return reduce(mul, (d.value for d in tensor.get_shape()), 1)

def gd_features_make(self, org_img, guide_img):
# noinspection PyUnusedLocal
with tf.Graph().as_default(), tf.Session() as sess:
org_shape = (1,) + org_img.shape
org_net, org_img_tf = self._build_vgg_net(org_shape)
org_features = dict()
self._features_make(org_img, org_img_tf, org_net, org_features, False)

guide_shapes = (1,) + guide_img.shape
guide_net, guide_img_tf = self._build_vgg_net(guide_shapes)
guide_features = dict()
self._features_make(guide_img, guide_img_tf, guide_net, guide_features, True)
return org_features, guide_features

def do_prisma(self, org_img, guide_img, ckp_fn, iter_n):
org_shape = (1,) + org_img.shape
org_features, guide_features = self.gd_features_make(org_img, guide_img)

with tf.Graph().as_default():
# out_v = tf.zeros(org_shape, dtype=tf.float32, name=None)
out_v = tf.random_normal(org_shape) * 0.256
out_img = tf.Variable(out_v)
out_net, _ = self._build_vgg_net(org_shape, out_img)

org_loss = K_ORG_WEIGHT * (2 * tf.nn.l2_loss(
out_net[K_ORG_LAYER] - org_features[K_ORG_LAYER]) /
org_features[K_ORG_LAYER].size)

style_loss = 0
for guide_layer in K_GUIDE_LAYERS:
layer = out_net[guide_layer]
_, height, width, number = map(lambda x: x.value, layer.get_shape())
size = height * width * number
feats = tf.reshape(layer, (-1, number))
gram = tf.matmul(tf.transpose(feats), feats) / size
style_gram = guide_features[guide_layer]
style_loss += K_GUIDE_WEIGHT * 2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size

tv_y_size = self._tensor_size(out_img[:, 1:, :, :])
tv_x_size = self._tensor_size(out_img[:, :, 1:, :])
tv_loss = K_TV_WEIGHT * 2 * (
(tf.nn.l2_loss(out_img[:, 1:, :, :] - out_img[:, :org_shape[1] - 1, :, :]) /
tv_y_size) +
(tf.nn.l2_loss(out_img[:, :, 1:, :] - out_img[:, :, :org_shape[2] - 1, :]) /
tv_x_size))
loss = org_loss + style_loss + tv_loss

# noinspection PyUnresolvedReferences
def print_progress(ind, last=False):
if last or (ind > 0 and ind % K_PRINT_ITER == 0):
ZLog.info('Iteration %d/%d\n' % (ind + 1, iter_n))
ZLog.debug('  content loss: %g\n' % org_loss.eval())
ZLog.debug('    style loss: %g\n' % style_loss.eval())
ZLog.debug('       tv loss: %g\n' % tv_loss.eval())
ZLog.debug('    total loss: %g\n' % loss.eval())

best_loss = float('inf')
best = None
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for i in range(iter_n):
last_step = (i == iter_n - 1)
if not g_doing_parallel:
print_progress(i, last=last_step)
train_step.run()
if (i > 0 and i % K_CKP_ITER == 0) or last_step:
# noinspection PyUnresolvedReferences
this_loss = loss.eval()
if this_loss < best_loss:
best_loss = this_loss
best = out_img.eval()
if not last_step:
ckp_fn_iter = K_CKP_FN_FMT % (ckp_fn, i)
PrismaHelper.save_array_img(best.reshape(org_shape[1:]) + self.mean_pixel, ckp_fn_iter)
return best.reshape(org_shape[1:]) + self.mean_pixel

gd_path = '../prisma_gd/k5.jpg'
guide = np.float32(tp.resize_img(PIL.Image.open(gd_path)))
PrismaHelper.show_array_ipython(guide)

PrismaHelper.show_array_ipython(tp.fit_guide_img(abu1_file, gd_path, resize=True, iter_n=1800))

k5_list = glob.glob("../sample/batch_tensor/k5*.jpeg")
k5_ind = map(lambda fn: int(fn.rsplit('.')[2].rsplit('_')[-1]), k5_list)
k5_sorted = sorted(zip(k5_ind, k5_list))
k5_sorted

[(100, '../sample/batch_tensor/k51051051372_100.jpeg'),
(200, '../sample/batch_tensor/k51051051372_200.jpeg'),
(300, '../sample/batch_tensor/k51051051372_300.jpeg'),
(400, '../sample/batch_tensor/k51051051372_400.jpeg'),
(500, '../sample/batch_tensor/k51051051372_500.jpeg'),
(600, '../sample/batch_tensor/k51051051372_600.jpeg'),
(700, '../sample/batch_tensor/k51051051372_700.jpeg'),
(800, '../sample/batch_tensor/k51051051372_800.jpeg'),
(900, '../sample/batch_tensor/k51051051372_900.jpeg'),
(1000, '../sample/batch_tensor/k51051051372_1000.jpeg'),
(1100, '../sample/batch_tensor/k51051051372_1100.jpeg'),
(1200, '../sample/batch_tensor/k51051051372_1200.jpeg'),
(1300, '../sample/batch_tensor/k51051051372_1300.jpeg'),
(1400, '../sample/batch_tensor/k51051051372_1400.jpeg'),
(1500, '../sample/batch_tensor/k51051051372_1500.jpeg'),
(1600, '../sample/batch_tensor/k51051051372_1600.jpeg'),
(1700, '../sample/batch_tensor/k51051051372_1700.jpeg'),
(1800, '../sample/batch_tensor/k51051051372_1800.jpeg')]

fig, axs = plt.subplots(nrows=3, ncols=6, figsize=(18, 9));
axs_list = list(itertools.chain.from_iterable(axs))
for ind, ax in zip(range(3 * 6), axs_list):
iter_cnt, iter_fn = k5_sorted[ind]
ax.imshow(iter_img);
ax.set_title("k5 iter: {}".format(iter_cnt))
ax.set_axis_off()

TensorPrismaClass可以胜任风格画渲染的使命，但问题就是速度太慢了，而且你可以查看代码类函数_features_make它对特征的筛选是引导图只使用浅层特征K_GUIDE_LAYERS = ('relu1_1', 'relu2_1', 'relu3_1', 'relu4_1', 'relu5_1')，原始图像使用K_ORG_LAYER = 'relu4_2'这样很明显无法作出一幅主题非常突出鲜明的图像，所以感觉这种方式比较定向适合特定类型的图像，普遍适应存在很大的问题

2.2 预处理图像和事后处理图像

kl_file = '../sample/kl.jpg'
kl_img = np.float32(tp.resize_img(PIL.Image.open(kl_file)))
PrismaHelper.show_array_ipython(kl_img)
cx6_file = '../prisma_gd/cx6.jpg'
cx6_img = np.float32(tp.resize_img(PIL.Image.open(cx6_file)))
PrismaHelper.show_array_ipython(cx6_img)

tn_img = tp.fit_guide_img(kl_file, cx6_file, resize=True, iter_n=3500)
PrismaHelper.show_array_ipython(tn_img)

# 整体提高rgb值
tnn_br = tn_img * 1.3
PrismaHelper.show_array_ipython(tnn_br)

ft = PIL.Image.fromarray(np.uint8(tn_img))
# 使用base中封装好的pil对图像的预置处理函数，进行事后图像处理
ft = tp.handle_enhance(ft, 'Contrast')
ft

# 也可以在变换的基础上再次使用CaffePrismaClass
img_np = np.float32(ft)
d_img = cp.fit_img('', resize=True, nbk='conv2/3x3', iter_n=10, img_np=img_np)
PrismaHelper.show_array_ipython(np.float32(d_img))

2.3 一个有意思的实验

guide = np.float32(tp.resize_img(PIL.Image.open('../prisma_gd/106480401.jpg')))
PrismaHelper.show_array_ipython(guide)

PrismaHelper.show_array_ipython(tp.fit_guide_img(s_file, gd_path, resize=True, size=640, iter_n=1500))

2.4 批量转换风格画接口的使用

def do_fit_parallel_img(path_product, resize, size, enhance, iter_n):
global g_doing_parallel

"""
要在每个进程设置模块全局变量
"""
g_doing_parallel = True

img_path = path_product[0]
gd_path = path_product[1]
prisma_img = TensorPrismaClass().fit_guide_img(img_path, gd_path, resize=resize, size=size, enhance=enhance,
iter_n=iter_n)

g_doing_parallel = False
return path_product, prisma_img

def fit_parallel_img(img_path, gd_path, resize=False, size=480, enhance=None, iter_n=800, n_jobs=-1):
if not isinstance(img_path, list) or not isinstance(gd_path, list):
raise TypeError('img_path or gd_path must list for mul process handle!')

parallel = Parallel(
n_jobs=n_jobs, verbose=0, pre_dispatch='2*n_jobs')

out = parallel(delayed(do_fit_parallel_img)(path_product, resize, size, enhance, iter_n) for path_product in
product(img_path, gd_path))

return out