from __future__ import division
import os
import time
from glob import glob
import tensorflow as tf
import numpy as np
from scipy.io import savemat
from ops import *
class FaceAging(object):
def __init__(self,
session, # TensorFlow session
size_image=128, # size the input images
size_kernel=5, # size of the kernels in convolution and deconvolution
size_batch=100, # mini-batch size for training and testing, must be square of an integer
num_input_channels=3, # number of channels of input images
num_encoder_channels=64, # number of channels of the first conv layer of encoder
num_z_channels=50, # number of channels of the layer z (noise or code)
num_categories=10, # number of categories (age segments) in the training dataset
num_gen_channels=1024, # number of channels of the first deconv layer of generator
enable_tile_label=True, # enable to tile the label
tile_ratio=1.0, # ratio of the length between tiled label and z
is_training=True, # flag for training or testing mode
save_dir='./save', # path to save checkpoints, samples, and summary
dataset_name='UTKFace' # name of the dataset in the folder ./data
):
self.session = session
self.image_value_range = (-1, 1)
self.size_image = size_image
self.size_kernel = size_kernel
self.size_batch = size_batch
self.num_input_channels = num_input_channels
self.num_encoder_channels = num_encoder_channels
self.num_z_channels = num_z_channels
self.num_categories = num_categories
self.num_gen_channels = num_gen_channels
self.enable_tile_label = enable_tile_label
self.tile_ratio = tile_ratio
self.is_training = is_training
self.save_dir = save_dir
self.dataset_name = dataset_name
# ************************************* input to graph ********************************************************
self.input_image = tf.placeholder(
tf.float32,
[self.size_batch, self.size_image, self.size_image, self.num_input_channels],
name='input_images'
)
self.age = tf.placeholder(
tf.float32,
[self.size_batch, self.num_categories],
name='age_labels'
)
self.gender = tf.placeholder(
tf.float32,
[self.size_batch, 2],
name='gender_labels'
)
self.z_prior = tf.placeholder(
tf.float32,
[self.size_batch, self.num_z_channels],
name='z_prior'
)
# ************************************* build the graph *******************************************************
print '\n\tBuilding graph ...'
# encoder: input image --> z
self.z = self.encoder(
image=self.input_image
)
# generator: z + label --> generated image
self.G = self.generator(
z=self.z,
y=self.age,
gender=self.gender,
enable_tile_label=self.enable_tile_label,
tile_ratio=self.tile_ratio
)
# discriminator on z
self.D_z, self.D_z_logits = self.discriminator_z(
z=self.z,
is_training=self.is_training
)
# discriminator on G
self.D_G, self.D_G_logits = self.discriminator_img(
image=self.G,
y=self.age,
gender=self.gender,
is_training=self.is_training
)
# discriminator on z_prior
self.D_z_prior, self.D_z_prior_logits = self.discriminator_z(
z=self.z_prior,
is_training=self.is_training,
reuse_variables=True
)
# discriminator on input image
self.D_input, self.D_input_logits = self.discriminator_img(
image=self.input_image,
y=self.age,
gender=self.gender,
is_training=self.is_training,
reuse_variables=True
)
# ************************************* loss functions *******************************************************
# loss function of encoder + generator
#self.EG_loss = tf.nn.l2_loss(self.input_image - self.G) / self.size_batch # L2 loss
self.EG_loss = tf.reduce_mean(tf.abs(self.input_image - self.G)) # L1 loss
# loss function of discriminator on z
self.D_z_loss_prior = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_prior_logits, labels=tf.ones_like(self.D_z_prior_logits))
)
self.D_z_loss_z = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits, labels=tf.zeros_like(self.D_z_logits))
)
self.E_z_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_z_logits, labels=tf.ones_like(self.D_z_logits))
)
# loss function of discriminator on image
self.D_img_loss_input = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_input_logits, labels=tf.ones_like(self.D_input_logits))
)
self.D_img_loss_G = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits, labels=tf.zeros_like(self.D_G_logits))
)
self.G_img_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=self.D_G_logits, labels=tf.ones_like(self.D_G_logits))
)
# total variation to smooth the generated image
tv_y_size = self.size_image
tv_x_size = self.size_image
self.tv_loss = (
(tf.nn.l2_loss(self.G[:, 1:, :, :] - self.G[:, :self.size_image - 1, :, :]) / tv_y_size) +
(tf.nn.l2_loss(self.G[:, :, 1:, :] - self.G[:, :, :self.size_image - 1, :]) / tv_x_size)) / self.size_batch
# *********************************** trainable variables ****************************************************
trainable_variables = tf.trainable_variables()
# variables of encoder
self.E_variables = [var for var in trainable_variables if 'E_' in var.name]
# variables of generator
self.G_variables = [var for var in trainable_variables if 'G_' in var.name]
# variables of discriminator on z
self.D_z_variables = [var for var in trainable_variables if 'D_z_' in var.name]
# variables of discriminator on image
self.D_img_variables = [var for var in trainable_variables if 'D_img_' in var.name]
# ************************************* collect the summary ***************************************
self.z_summary = tf.summary.histogram('z', self.z)
self.z_prior_summary = tf.summary.histogram('z_prior', self.z_prior)
self.EG_loss_summary = tf.summary.scalar('EG_loss', self.EG_loss)
self.D_z_loss_z_summary = tf.summary.scalar('D_z_loss_z', self.D_z_loss_z)
self.D_z_loss_prior_summary = tf.summary.scalar('D_z_loss_prior', self.D_z_loss_prior)
self.E_z_loss_summary = tf.summary.scalar('E_z_loss', self.E_z_loss)
self.D_z_logits_summary = tf.summary.histogram('D_z_logits', self.D_z_logits)
self.D_z_prior_logits_summary = tf.summary.histogram('D_z_prior_logits', self.D_z_prior_logits)
self.D_img_loss_input_summary = tf.summary.scalar('D_img_loss_input', self.D_img_loss_input)
self.D_img_loss_G_summary = tf.summary.scalar('D_img_loss_G', self.D_img_loss_G)
self.G_img_loss_summary = tf.summary.scalar('G_img_loss', self.G_img_loss)
self.D_G_logits_summary = tf.summary.histogram
爱研究的小牛
- 粉丝: 2301
- 资源: 204
最新资源
- 2025伤口、造口、失禁性皮炎知识考试题及答案.docx
- 基于comsol模拟的多裂纹水力压裂扩展:拉伸与压缩破坏的探索与实践,comsol多裂纹水力压裂扩展,可以实现拉伸和压缩下的破坏 ,comsol;多裂纹;水力压裂扩展;拉伸破坏;压缩破坏,"Coms
- "研究comsol水热力三场对降雨边坡在冻融解冻过程中稳定性与周期性变化的调控-基于非饱和渗流及刚度折减的边坡安全系数分析",comsol 水热力三场,降雨边坡在冻融到解冻过程的稳定性周期性变化 边
- "侵蚀性水环境下混凝土钙溶蚀模型构建与教学视频解析",侵蚀性水作用下混凝土钙溶蚀模型研究 有相对应完整的教学视频 ,侵蚀性水作用;混凝土钙溶蚀模型;研究;教学视频,"水作用下混凝土钙溶蚀模型教学
- "COMSOL水力压裂相场模型研究:不同围压对裂纹扩展的定量影响及其实证分析",comsol水力压裂相场模型,不同围压对裂纹扩展的影响 含参考文献 ,comsol水力压裂; 相场模型; 围压; 裂纹扩
- 2025全国保密知识竞赛题库(含答案).pptx
- 2025全国保密知识题库及答案.pptx
- 2025全国大学生安全知识竞赛题库及答案.pptx
- 2025全国企业员工全面质量管理知识竞赛必考题及答案.doc
- 《煤层瓦斯抽采技术:基于COMSOL的裂隙孔隙双重介质模拟复现研究》,comsol煤层瓦斯抽采,裂隙孔隙双重介质 comsollunwen复现,comsol建模 ,comsol;煤层瓦斯抽采;裂隙
- 2025全国保密知识竞赛题库(含答案).docx
- 2025全国保密知识题库及答案.docx
- 2025全国大学生心理健康知识竞赛题库及答案(通用版).docx
- 2025全国电力安全工作规程考试题及答案.docx
- "基于Comsol仿真的质子交换膜电解槽多物理场耦合模型:传热、多孔介质流动与极化性能分析",质子交膜(PEM)电解槽comsol仿真模型,耦合电解槽,传热,多孔介质流动物理场,可以计算出电解槽极化曲
- "利用Comsol双温方程模拟激光烧蚀过程:固体传热模块的实践与烧蚀结果的变形几何实现",comsol双温方程,激光烧蚀,利用双温方程:固体传热模块,带有烧蚀结果(利用变形几何模块实现) ,comso
资源上传下载、课程学习等过程中有任何疑问或建议,欢迎提出宝贵意见哦~我们会及时处理!
点击此处反馈