- 线性回归
ℎ𝜃(𝑥)=𝜃0+𝜃1𝑥1+𝜃2𝑥2+...+𝜃𝑛𝑥𝑛
这个公式中有n+1个参数和n个变量,为了使得公式能够简化一些,引入𝑥0=1,则公式转化为:ℎ𝜃(𝑥)=𝜃0𝑥0+𝜃1𝑥1+𝜃2𝑥2+...+𝜃𝑛𝑥𝑛此时模型中的参数是一个n+1维的向量,任何一个训练实例也都是n+1维的向量,特征矩阵X的维度是 m*(n+1)。 因此公式可以简化为:ℎ𝜃(𝑥)=𝜃𝑇𝑋,其中T代表矩阵转置。
def linear_regression(X_data, y_data, alpha, epoch, optimizer=tf.train.GradientDescentOptimizer):# 这个函数是旧金山的一个大神Lucas Shen写的
# placeholder for graph input
X = tf.placeholder(tf.float32, shape=X_data.shape)#占位符
y = tf.placeholder(tf.float32, shape=y_data.shape)
# construct the graph
with tf.variable_scope('linear-regression'):
W = tf.get_variable("weights",
(X_data.shape[1], 1),
initializer=tf.constant_initializer()) # n*1
y_pred = tf.matmul(X, W) # m*n @ n*1 -> m*1
loss = 1 / (2 * len(X_data)) * tf.matmul((y_pred - y), (y_pred - y), transpose_a=True) # (m*1).T @ m*1 = 1*1
opt = optimizer(learning_rate=alpha)
opt_operation = opt.minimize(loss)
# run the session
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_data = []
for i in range(epoch):
_, loss_val, W_val = sess.run([opt_operation, loss, W], feed_dict={X: X_data, y: y_data})
loss_data.append(loss_val[0, 0]) # because every loss_val is 1*1 ndarray
if len(loss_data) > 1 and np.abs(loss_data[-1] - loss_data[-2]) < 10 ** -9: # early break when it's converged
# print('Converged at epoch {}'.format(i))
break
# clear the graph
tf.reset_default_graph()
return {'loss': loss_data, 'parameters': W_val} # just want to return in row vector format