import requests
import numpy as np
r = requests.get('http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
with open('iris.data', 'w') as f:
f.write(r.text)
import pandas as pd
data = pd.read_csv('iris.data', names =['e_cd', 'e_kd', 'b_cd', 'b_kd', 'cat'])
data.head(5)
|
e_cd |
e_kd |
b_cd |
b_kd |
cat |
| 0 |
5.1 |
3.5 |
1.4 |
0.2 |
Iris-setosa |
| 1 |
4.9 |
3.0 |
1.4 |
0.2 |
Iris-setosa |
| 2 |
4.7 |
3.2 |
1.3 |
0.2 |
Iris-setosa |
| 3 |
4.6 |
3.1 |
1.5 |
0.2 |
Iris-setosa |
| 4 |
5.0 |
3.6 |
1.4 |
0.2 |
Iris-setosa |
data.cat.unique()
array(['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'], dtype=object)
data['c1'] = np.array(data['cat'] == 'Iris-setosa').astype(np.float32)
data['c2'] = np.array(data['cat'] == 'Iris-versicolor').astype(np.float32)
data['c3'] = np.array(data['cat'] == 'Iris-virginica').astype(np.float32)
target = np.stack([data.c1.values, data.c2.values, data.c3.values]).T
shuju = np.stack([data.e_cd.values, data.e_kd.values, data.b_cd.values, data.b_kd.values]).T
np.shape(shuju), np.shape(target)
((150, 4), (150, 3))
import tensorflow as tf
/anaconda3/envs/py35/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: compiletime version 3.6 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.5
return f(*args, **kwds)
/anaconda3/envs/py35/lib/python3.5/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
x = tf.placeholder("float", shape=[None, 4])
y = tf.placeholder("float", shape=[None, 3])
weight = tf.Variable(tf.truncated_normal([4,3]))
bias = tf.Variable(tf.truncated_normal([3]))
combine_input = tf.matmul(x, weight) + bias
pred = tf.nn.softmax(combine_input)
y.get_shape(), pred.get_shape()
(TensorShape([Dimension(None), Dimension(3)]),
TensorShape([Dimension(None), Dimension(3)]))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=combine_input))
WARNING:tensorflow:From <ipython-input-18-6b11325178b9>:1: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Future major versions of TensorFlow will allow gradients to flow
into the labels input on backprop by default.
See tf.nn.softmax_cross_entropy_with_logits_v2.
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
train_step = tf.train.AdamOptimizer(0.0005).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10000):
index = np.random.permutation(len(target))
shuju =shuju[index]
target = target[index]
sess.run(train_step, feed_dict={x:shuju, y:target})
if i%1000 == 0:
print(sess.run((loss, accuracy), feed_dict={x:shuju, y:target}))
(0.48896936, 0.74)
(0.3105031, 0.91333336)
(0.23715515, 0.9533333)
(0.18639529, 0.96666664)
(0.15062416, 0.96666664)
(0.12501644, 0.98)
(0.10630642, 0.98)
(0.09238311, 0.98)
(0.08186688, 0.98)
(0.07382768, 0.98)
print(sess.run(weight))
[[ 1.319767 1.5383282 -1.6495701 ]
[ 4.432047 0.22605467 -3.0211918 ]
[-4.785147 -1.7326641 3.8946218 ]
[-4.521448 -2.30758 3.069917 ]]
print(sess.run(bias))
[ 1.521937 2.874068 -4.503109]