Tensorflow tf.data one-shot读取猫狗

245 阅读1分钟
import tensorflow as tf
import glob
import numpy as np
/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
  from ._conv import register_converters as _register_converters
tf.VERSION
'1.10.0'
image_filenames = glob.glob('./dc/train/*.jpg')
image_filenames = np.random.permutation(image_filenames)
lables = list(map(lambda x: float(x.split('\\')[1].split('.')[0] == 'cat'), image_filenames))
dataset = tf.data.Dataset.from_tensor_slices((image_filenames, lables))
dataset
def _pre_read(img_filename, lable):
    image = tf.read_file(img_filename)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.rgb_to_grayscale(image)
    image = tf.image.resize_images(image, (200, 200))
    image = tf.reshape(image, [200, 200, 1])
    image = tf.image.per_image_standardization(image)
    lable = tf.reshape(lable, [1])
    return image, lable
dataset = dataset.map(_per_read)
dataset = dataset.shuffle(300)
dataset = dataset.repeat(-1)
dataset = dataset.batch(64)
dataset
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
conv2d_1 = tf.contrib.layers.convolution2d(
    next_element[0],
    num_outputs=32,
    weights_initializer = tf.truncated_normal_initializer(stddev=0.001),
    kernel_size = (5,5),
    activation_fn = tf.nn.relu,
    stride = (1,1),
    padding = 'SAME',
    trainable = True)
pool_1 = tf.nn.max_pool(conv2d_1,
                       ksize = [1,3,3,1],
                       strides = [1,2,2,1],
                       padding='SAME')

conv2d_2 = tf.contrib.layers.convolution2d(
    pool_1,
    num_outputs=32,
    weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
    kernel_size = (5,5),
    activation_fn = tf.nn.relu,
    stride = (1,1),
    padding = 'SAME',
    trainable = True)
pool_2 = tf.nn.max_pool(conv2d_2,
                       ksize = [1,3,3,1],
                       strides = [1,2,2,1],
                       padding='SAME')

conv2d_3 = tf.contrib.layers.convolution2d(
    pool_2,
    num_outputs=64,
    weights_initializer = tf.truncated_normal_initializer(stddev=0.01),
    kernel_size = (5,5),
    activation_fn = tf.nn.relu,
    stride = (1,1),
    padding = 'SAME',
    trainable = True)
pool_3 = tf.nn.max_pool(conv2d_3,
                       ksize = [1,3,3,1],
                       strides = [1,2,2,1],
                       padding='SAME')

pool3_flat = tf.reshape(pool_3, [-1, 25*25*64])
fc_1 = tf.contrib.layers.fully_connected(
                            pool3_flat, 
                            1024, 
                            weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
                            activation_fn = tf.nn.relu)

fc_2 = tf.contrib.layers.fully_connected(
                            fc_1, 
                            192, 
                            weights_initializer = tf.truncated_normal_initializer(stddev=0.1),
                            activation_fn = tf.nn.relu)


out_wl = tf.Variable(tf.truncated_normal([192, 1]))
out_bl = tf.Variable(tf.truncated_normal([1]))
comb_out = tf.matmul(fc_2, out_wl) + out_bl
pred = tf.sigmoid(comb_out)

lable_batch = tf.cast(lable_batch, tf.float32)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = next_element[1], logits = comb_out))
train_step = tf.train.AdamOptimizer(0.0001).minimize(loss)
predicted = tf.cast(pred >0.5, tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, lable_batch), tf.float32))

saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for step in range(0, 3000):
        sess.run(train_step)
        if(step %10 == 0):
            res = sess.run([loss, accuracy])
            print(step, res)
            saver.save(sess, './lesson30', global_step = step)

不是-1 的情况

step = 0
saver = tf.train.Saver()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    while True:
        try:
            sess.run(train_step)
            if(step %100 == 0):
                res = sess.run([loss, accuracy])
                print(step, res)
                saver.save(sess, './lesson30', global_step = step)
            step += 1
        except tf.errors.OutOfRangeError:
            saver.save(sess, './lesson30', global_step = step)
            print('done!')
            print(step)
            break
        step += 1