import tensorflow as tf
from show import show_graph
from tensorflow.examples.tutorials.mnist import input_data
# 显存管理
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0' # 指定第一块GPU可用
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # 最多允许占用50%显存
config.gpu_options.allow_growth = True # 按需申请显存
mnist = input_data.read_data_sets("./datasets/mnist/")
X_train = mnist.train.images
X_test = mnist.test.images
y_train = mnist.train.labels.astype("int")
y_test = mnist.test.labels.astype("int")
X_train.shape
y_train.shape
# 设置随机数种子
config = tf.contrib.learn.RunConfig(tf_random_seed=42)
# 提取特征
feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(X_train)
feature_cols
# 构建DNN分类器
# ... 两层的DNN,神经元数量分别为300和100
# ... 10分类,输入特征以及配置信息(随机数种子的配置)
dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[300, 100], n_classes=10,
feature_columns=feature_cols, config=config)
dnn_clf = tf.contrib.learn.SKCompat(dnn_clf) # if TensorFlow >= 1.1
# 投喂数据
# ... 批大小50,训练4W步
dnn_clf.fit(X_train, y_train, batch_size=50, steps=40000)
1. 准确率
from sklearn.metrics import accuracy_score
y_pred = dnn_clf.predict(X_test)
accuracy_score(y_test, y_pred['classes'])
2. 对数损失
from sklearn.metrics import log_loss
y_pred_proba = y_pred['probabilities']
log_loss(y_test, y_pred_proba)