官方代码: https://github.com/MorvanZhou/Tensorflow-Tutorial/blob/master/tutorial-contents/302_simple_classification.py 训练前自己的数据
训练之后的数据(分类之后的数据)
代码详解
"""
Know more, visit my Python tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
Dependencies:
tensorflow: 1.1.0
matplotlib
numpy
"""
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
tf.set_random_seed(1) #设置随机种子点
np.random.seed(1)
# fake data
n_data = np.ones((100, 2)) #设置100行2列的矩阵,矩阵里面全是1
x0 = np.random.normal(2*n_data, 1) # class0 x shape=(100, 2)
y0 = np.zeros(100) # class0 y shape=(100, ) #设置100行列数未知的矩阵,矩阵里面全是0
x1 = np.random.normal(-2*n_data, 1) # class1 x shape=(100, 2)
y1 = np.ones(100) # class1 y shape=(100, )
x = np.vstack((x0, x1)) # shape (200, 2) + some noise #沿着竖直方向将矩阵堆叠起来。
y = np.hstack((y0, y1)) # shape (200, ) #np.hstack():在水平方向上平铺
# plot data
plt.scatter(x[:, 0], x[:, 1], c=y, s=100, lw=0, cmap='RdYlGn') #显示散点图
plt.show()
tf_x = tf.placeholder(tf.float32, x.shape) # input x #占位符 后面必须有sess.run(predcit-y,feed={'xs':imput,'ys':output})
tf_y = tf.placeholder(tf.int32, y.shape) # input y
# neural network layers
l1 = tf.layers.dense(tf_x, 10, tf.nn.relu) # hidden layer 添加层,相当与add_layer()
output = tf.layers.dense(l1, 2) # output layer
#与logits具有相同类型的加权损失Tensor.如果reduction是NONE,它的形状与labels相同;否则,它是标量.
loss = tf.losses.sparse_softmax_cross_entropy(labels=tf_y, logits=output) # compute cost
accuracy = tf.metrics.accuracy( # return (acc, update_op), and create 2 local variables 评估指标算子
#argmax请参照 https://blog.csdn.net/qq575379110/article/details/70538051
labels=tf.squeeze(tf_y), predictions=tf.argmax(output, axis=1),)[1]
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05) #优化器,传播误差
train_op = optimizer.minimize(loss)
sess = tf.Session() # control training and others
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) #对 TensorFlow 的多个操作进行分组
sess.run(init_op) # initialize var in graph
plt.ion() # something about plotting
for step in range(100):
# train and net output
_, acc, pred = sess.run([train_op, accuracy, output], {tf_x: x, tf_y: y})
if step % 2 == 0:
# plot and show learning process
plt.cla()
plt.scatter(x[:, 0], x[:, 1], c=pred.argmax(1), s=100, lw=0, cmap='RdYlGn')
plt.text(1.5, -4, 'Accuracy=%.2f' % acc, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()