import tensorflow as tf
import glob
import numpy as np
/anaconda3/envs/py35/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5
return f(*args, **kwds)
/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
image_filenames = glob.glob('./dc/train/*.jpg')
image_filenames[0]
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-1aa03117bdd7> in <module>()
----> 1 image_filenames[0]
IndexError: list index out of range
lables = list(map(lambda x : x.split('\\')[1].split('.')[0], image_filenames))
train_lable = [[1, 0] if x == 'cat' else [0, 1] for x in lables]
image_que = tf.train.slice_input_producer([image_filenames, train_lable])
image_ = tf.read_file(image_que[0])
image = tf.image.decode_jpeg(image_, channels = 3)
grey_image = tf.image.rgb_to_grayscale(image)
resize_image = tf.image_resize_images(grey_image, (200, 200))
resize_image = tf.reshape(resize_image, [200, 200, 1])
new_img = tf.image.per_image_standardization(resize_image)
batch_size = 60
capacity = 10 + 2 * batch_size
image_batch, lable_batch = tf.train.batch([new_img, image_que[1]], batch_size=batch_size, capacity=capacity)
lable_batch.get_shape()
conv2d_1 = tf.contrib.layers.convolution2d(
image_batch,
num_outputs=32,
weights_initializer = tf.truncated_normal_initializer(stddev=0.001),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_1 = tf.nn.max_pool(conv2d_1,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
conv2d_2 = tf.contrib.layers.convolution2d(
pool_1,
num_outputs=32,
weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_2 = tf.nn.max_pool(conv2d_2,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
conv2d_3 = tf.contrib.layers.convolution2d(
pool_2,
num_outputs=64,
weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_3 = tf.nn.max_pool(conv2d_3,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
pool_3.get_shape()
pool3_flat = tf.reshape(pool_3, [-1, 25*25*64])
fc_1 = tf.contrib.layers.fully_connected(
pool3_flat,
1024,
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
activation_fn = tf.nn.relu)
fc_2 = tf.contrib.layers.fully_connected(
fc_1,
192,
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
activation_fn = tf.nn.relu)
out_wl = tf.Variable(tf.truncated_normal([192, 2]))
out_bl = tf.Variable(tf.truncated_normal([2]))
comb_out = tf.matmul(fc_2, out_wl) + out_bl
pred = tf.sigmoid(comb_out)
pred.get_shape()
lable_batch.get_shape()
lable_batch = tf.cast(lable_batch, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = lable_batch, logits = comb_out))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
predicted = tf.cast(pred >0.5, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, lable_batch), tf.float32))
saver = tf.train.Saver()
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord = coord)
sess.run(tf.global_variables_initializer())
for step in range(0, 3000):
sess.run(train_step)
if(step %100 == 0):
res = sess.run([loss, accuracy])
print(step, res)
saver.save(sess, './lesson30', global_step = step)
coord.request_stop()
coord.join(threads)
恢复检查点
#import os
#ckpt = tf.train_get_checkpoint_state(os.path.dirname('__file__'))
#saver = tf.train.Saver()
#sess = tf.Session()
#sess.run(tf.global_variables_initializer())
#saver.restore(sess, ckpt.model_checkpoint_path)
#coord = tf.train.Coordinator()
#threads = tf.train.start_queue_runners(coord = coord)
#for step in range(6000, 7000):
# sess.run(train_step, feed_dict={keep_prob:0.5})
# if(step %100 == 0):
# res = sess.run([loss, accuracy], feed_dict={keep_prob:1})
# print(step, res)
# saver.save(sess, './lesson30', global_step = step)
#coord.request_stop()
#coord.join(threads)