import tensorflow as tf
import glob
import numpy as np
/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
tf.VERSION
'1.10.0'
image_filenames = glob.glob('./dc/train/*.jpg')
image_filenames = np.random.permutation(image_filenames)
lables = list(map(lambda x: float(x.split('\\')[1].split('.')[0] == 'cat'), image_filenames))
train_lables = tf.data.Dataset.from_tensor_slices(train_lables)
train_lables
train_dataset = tf.data.Dataset.from_tensor_slices(image_filenames)
def _pre_read(img_filename): image = tf.read_file(img_filename) image = tf.image.decode_jpeg(image, channels=3) image = tf.image.rgb_to_grayscale(image) image = tf.image.resize_images(image, (200, 200)) image = tf.reshape(image, [200, 200, 1]) image = tf.image.per_image_standardization(image) return image
train_dataset = train_dataset.map(_pre_read)
train_lable_dataset = tf.data.Dataset.zip((train_dataset, train_lables))
train_lable_dataset
train_lable_dataset = dataset.shuffle(300)
train_lable_dataset = dataset.repeat(5)
train_lable_dataset = dataset.batch(64)
train_lable_dataset
image_filenames_test = glob.glob('./dc/test/*.jpg')
test_dataset = tf.data.Dataset.from_tensor_slices(image_filenames_test)
test_dataset = test_dataset.map(_pre_read)
test_lables = tf.data.Dataset.from_tensor_slices(
np.random.randint(0,1, len(image_filenames_test)).astype(np.float32).reshape(-1,1))
test_lable_dataset = tf.data.Dataset.zip((test_dataset, test_lables))
test_lable_dataset = test_lable_dataset.shuffle(60)
test_lable_dataset = test_lable_dataset.repeat(1)
test_lable_dataset = test_lable_dataset.batch(20)
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, test_lable_dataset.output_types, test_lable_dataset.output_shapes)
next_element = iterator.get_next()
training_iterator = train_lable_dataset.make_one_shot_iterator()
test_iterator = test_dataset.make_one_shot_iterator()
conv2d_1 = tf.contrib.layers.convolution2d(
next_element[0],
num_outputs=32,
weights_initializer = tf.truncated_normal_initializer(stddev=0.001),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_1 = tf.nn.max_pool(conv2d_1,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
conv2d_2 = tf.contrib.layers.convolution2d(
pool_1,
num_outputs=32,
weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_2 = tf.nn.max_pool(conv2d_2,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
conv2d_3 = tf.contrib.layers.convolution2d(
pool_2,
num_outputs=64,
weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
kernel_size = (5,5),
activation_fn = tf.nn.relu,
stride = (1,1),
padding = 'SAME',
trainable = True)
pool_3 = tf.nn.max_pool(conv2d_3,
ksize = [1,3,3,1],
strides = [1,2,2,1],
padding='SAME')
pool3_flat = tf.reshape(pool_3, [-1, 25*25*64])
fc_1 = tf.contrib.layers.fully_connected(
pool3_flat,
1024,
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
activation_fn = tf.nn.relu)
fc_2 = tf.contrib.layers.fully_connected(
fc_1,
192,
weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
activation_fn = tf.nn.relu)
out_wl = tf.Variable(tf.truncated_normal([192, 1]))
out_bl = tf.Variable(tf.truncated_normal([1]))
comb_out = tf.matmul(fc_2, out_wl) + out_bl
pred = tf.sigmoid(comb_out)
lable_batch = tf.cast(lable_batch, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = next_element[1], logits = comb_out))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
predicted = tf.cast(pred >0.5, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, lable_batch), tf.float32))
step = 0
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
training_handle = sess.run(training_iterator.string_handle())
test_handle = sess.run(test_iterator.string_handle())
for i in ranges(2000):
sess.run(train_step, feed_dict = {handle : training_handle})
if(step %100 == 0):
res = sess.run([loss, accuracy], feed_dict = {handle : training_handle})
print(step, res)
saver.save(sess, './lesson30', global_step = step)
step += 1
accuracy_rate = []
while True:
try:
acc_rate = sess.run(accuracy, feed_dice = {handle: test_handle})
accuracy_rate.append(acc_rate)
except tf.errors.OutOfRangeError:
print(np.mean(accuracy_rate))