from skimage import io, transform
from tensorflow.keras.models import Sequential
from matplotlib import pyplot as plt
from tensorflow.keras import losses, optimizers
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPooling2D, Dropout, Flatten, Dense
from sklearn.metrics import classification_report, confusion_matrix, recall_score
from tensorflow.keras import Model, layers
from tensorflow.keras import regularizers
from sklearn.svm import SVC
from rvm1 import RVC
import glob
import os
import numpy as np
import random
import time
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
path = 'E:/hui/'
# 将所有的图片resize成100*100
w = 70
h = 70
# 读取图片
def read_img(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
for idx, folder in enumerate(cate):
for im in glob.glob(folder + '/*.jpg'):
img = io.imread(im)
# img = transform.resize(img, (w, h))
imgs.append(img)
labels.append(idx)
return np.asarray(imgs, np.float32), np.asarray(labels, np.int32)
data, label = read_img(path)
N = len(label)
print(N)
# 打乱顺序
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]/255
label = label[arr]
# 将所有数据分为训练集和验证集
ratio = 0.8
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
x_test = data[s:]
y_test = label[s:]
x_train = np.expand_dims(x_train, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)
print(x_train.shape)
print(x_test.shape)
print(y_train.shape)
print(y_test.shape)
model = Sequential()
model.add(Conv2D(96, (3, 3), padding='same', input_shape=(64, 64, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(256, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Conv2D(256, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.3))
model.add(Dense(4, activation='softmax'))
model.summary()
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer='adam', metrics=['accuracy'])
tb_cb = TensorBoard(log_dir='logs')
history = model.fit(x_train, y_train, batch_size=32, epochs=10, validation_split=0.1, callbacks=[tb_cb])
score = model.evaluate(x_test, y_test)
print("测试集上的损失:", score[0])
print("测试集上的准确度:", score[1])
# 显示训练集和验证集的acc和loss曲线
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
model_feat = Model(inputs=model.input, outputs=model.get_layer('dense').output)
feat_train = model_feat.predict(x_train)
print(feat_train.shape)
feat_test = model_feat.predict(x_test)
print(feat_test.shape)
svm = SVC(kernel='rbf', C=10, degree=4, coef0=1.0, gamma='auto')
svm.fit(feat_train, y_train)
print('fitting done !!!')
print(svm.score(feat_train, y_train))
print(svm.score(feat_test, y_test))
# 输出混淆矩阵
predictions = svm.predict(feat_test)
cm = confusion_matrix(y_test, predictions)
print(cm)
# 绘制混淆矩阵
print('预测标签', predictions)
print('真实标签', y_test)
classes = list(set(y_test))
# 排序,准确对上分类结果
classes.sort()
# 对比,得到混淆矩阵
confusion = confusion_matrix(predictions, y_test)
# 热度图,后面是指定的颜色块,gray也可以,gray_x反色也可以
plt.imshow(confusion, cmap=plt.cm.Blues)
# 这个东西就要注意了
# ticks 这个是坐标轴上的坐标点
# label 这个是坐标轴的注释说明
indices = range(len(confusion))
# 坐标位置放入
# 第一个是迭代对象,表示坐标的顺序
# 第二个是坐标显示的数值的数组,第一个表示的其实就是坐标显示数字数组的index,但是记住必须是迭代对象
plt.xticks(indices, classes)
plt.yticks(indices, classes)
# 热度显示仪?就是旁边的那个验孕棒啦
plt.colorbar()
# 就是坐标轴含义说明了
plt.xlabel('predictions')
plt.ylabel('data_test_y')
# 显示数据,直观些
for first_index in range(len(confusion)):
for second_index in range(len(confusion[first_index])):
plt.text(first_index, second_index, confusion[first_index][second_index])
# 显示
plt.show()
# rvm = RVC(kernel='rbf')
# rvm.fit(feat_train, y_train)
# print('fitting done !!!')
#
# print(rvm.score(feat_train, y_train))
# print(rvm.score(feat_test, y_test))
# def ExtractFeatures_CNN(I):
# model_feat = Model(inputs=model.input, outputs=model.get_layer('dense').output)
# return model_feat.predict(I)
#
#
# I = io.imread('E:/hui/normal/array_1.jpg')
# f = ExtractFeatures_CNN(I)
# print("PREDICTED")
# print(y_train[svm.predict(f)])
- 1
- 2
- 3
- 4
前往页