import inspect
from typing import List
from tensorflow import keras
from keras import backend as K
from keras import Model, Input, optimizers
from keras import layers
from keras.layers import Activation, SpatialDropout1D, Lambda
from keras.layers import Layer, Conv1D, Dense, BatchNormalization, LayerNormalization
def is_power_of_two(num: int):
return num != 0 and ((num & (num - 1)) == 0)#(num&(num-1)==0)num是2的指数
def adjust_dilations(dilations: list):#自适应的膨胀系数
if all([is_power_of_two(i) for i in dilations]):
return dilations
else:
new_dilations = [2 ** i for i in dilations]
return new_dilations
#定义残差块
class ResidualBlock(Layer):
def __init__(self,
dilation_rate: int,
nb_filters: int,
kernel_size: int,
padding: str,
activation: str = 'relu',
dropout_rate: float = 0,
kernel_initializer: str = 'he_normal',
use_batch_norm: bool = False,
use_layer_norm: bool = False,
**kwargs):
"""Defines the residual block for the WaveNet TCN,#为TCN网络定义残差块
Args:#命令行参数个数
x: The previous layer in the model模型中的上一层
training: boolean indicating whether the layer should behave in training mode or in inference mode指示图层在训练模式或推理模式下的行为是具有布尔值
dilation_rate: The dilation power of 2 we are using for this residual block我们用于此残余块的2的膨胀率
nb_filters: The number of convolutional filters to use in this block要在此块中使用的卷积滤波器的数量
kernel_size: The size of the convolutional kernel卷积核的大小
padding: The padding used in the convolutional layers, 'same' or 'causal'. 填充:卷积层中使用的填充,"相同"或"因果"。
activation: The final activation used in o = Activation(x + F(x))
dropout_rate: Float between 0 and 1. Fraction of the input units to drop.浮动在0到1之间。要丢弃的输入单位的分数。
kernel_initializer: Initializer for the kernel weights matrix (Conv1D).内核权重矩阵 (Conv1D) 的初始值设定项。
use_batch_norm: Whether to use batch normalization in the residual layers or not.是否在残差图层中使用批量归一化。
use_layer_norm: Whether to use layer normalization in the residual layers or not.是否在残差图层中使用图层归一化。
kwargs: Any initializers for Layer class.Layer 类的任何初始值设定项。
"""
self.dilation_rate = dilation_rate
self.nb_filters = nb_filters#过滤器即卷积核尺寸
self.kernel_size = kernel_size
self.padding = padding
self.activation = activation
self.dropout_rate = dropout_rate
self.use_batch_norm = use_batch_norm
self.use_layer_norm = use_layer_norm
self.kernel_initializer = kernel_initializer
self.layers = []
self.layers_outputs = []
self.shape_match_conv = None
self.res_output_shape = None
self.final_activation = None
super(ResidualBlock, self).__init__(**kwargs)
#定义一个添加层的函数可以很容易的添加神经层,为之后的添加省下不少时间
def _add_and_activate_layer(self, layer):
"""Helper function for building layer
Args:
layer: Appends layer to internal layer list and builds it based on the current output
shape of ResidualBlocK. Updates current output shape.
#添加图层到内部图层列表,并根据当前的输出构建它残差块(ResidualBlocK)的形状,更新当前输出形状。
"""
self.layers.append(layer)#将定义好的网络层添加进模型中
self.layers[-1].build(self.res_output_shape)
self.res_output_shape = self.layers[-1].compute_output_shape(self.res_output_shape)
def build(self, input_shape):#build() 可自定义网络的权重的维度,可以根据输入来指定权重的维度
with K.name_scope(self.name): # name scope used to make sure weights get unique names名称作用域,用于确保权重获得唯一的名称
self.layers = []
self.res_output_shape = input_shape
for k in range(2):
name = 'conv1D_{}'.format(k) #一维膨胀因果卷积网络,format(k)传递位置参数,conv1D_{k}。
with K.name_scope(name): # name scope used to make sure weights get unique names
self._add_and_activate_layer(Conv1D(filters=self.nb_filters,
kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate,
padding=self.padding,
name=name,
kernel_initializer=self.kernel_initializer))
with K.name_scope('norm_{}'.format(k)):#正则化
if self.use_batch_norm:
self._add_and_activate_layer(BatchNormalization())
elif self.use_layer_norm:
self._add_and_activate_layer(LayerNormalization())
self._add_and_activate_layer(Activation('relu'))#激活函数
self._add_and_activate_layer(SpatialDropout1D(rate=self.dropout_rate))
if self.nb_filters != input_shape[-1]:
# 1x1 conv to match the shapes (channel dimension).# 1x1转换以匹配形状(通道尺寸),即1x1全卷积连接
name = 'matching_conv1D'
with K.name_scope(name):
# make and build this layer separately because it directly uses input_shape,单独创建和构建这个层,因为它直接使用input_shape
self.shape_match_conv = Conv1D(filters=self.nb_filters,
kernel_size=1,
padding='same',#padding='SAME'意味着卷积后的输出与输入size保持一致。
name=name,
kernel_initializer=self.kernel_initializer)
else:
name = 'matching_identity'
self.shape_match_conv = Lambda(lambda x: x, name=name) #lambda定义一个函数,x=x
with K.name_scope(name):
self.shape_match_conv.build(input_shape)
self.res_output_shape = self.shape_match_conv.compute_output_shape(input_shape)#残差块的输出形状=卷积计算的输出形状()
self.final_activation = Activation(self.activation)#激活函数层
self.final_activation.build(self.res_output_shape) # probably isn't necessary
# this is done to force Keras to add the layers in the list to self._layers
#这样做是为了迫使Keras将列表中的图层添加到self._layers
for layer in self.layers:
self.__setattr__(layer.name, layer)
self.__setattr__(self.shape_match_conv.name, self.shape_match_conv)
self.__setattr__(self.final_activation.name, self.final_activation)
super(ResidualBlock, self).build(input_shape) # done to make sure self.built is set True
#super(Net, self).__init__()指子类继承了父类的所有属性和方法,父类属性自然会用父类方法来进行初始化
def