多項式回歸
例1
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
trX = np.linspace(-1, 1, 101)
num_coeffs = 6
trY_coeffs = [1, 2, 3, 4, 5, 6]
trY = 0
for i in range(num_coeffs):
trY += trY_coeffs[i] * np.power(trX, i)
trY += np.random.randn(*trX.shape) * 1.5
plt.scatter(trX, trY)
plt.show()
trX=tf.cast(trX,tf.float32)
trY=tf.cast(trY,tf.float32)
#y_model = model(X, w)
def model(X, w):
??? terms = []
??? for i in range(num_coeffs):
??????? term = tf.multiply(w[i], tf.pow(X, i))
??????? terms.append(term)
??? return tf.add_n(terms)
def loss(x, y,w):
??? err = model(x,w) - y
??? return tf.reduce_mean(tf.square(err))
def grad(x,y,w):
??? with tf.GradientTape() as tape:
??????? loss_ = loss(x,y,w)
??? return tape.gradient(loss_,[w])
learning_rate = 0.01
training_epochs = 200
W = tf.Variable(np.random.randn(6),dtype=tf.float32)
loss_list_train = []
optimizer = tf.optimizers.Adam()
for i in range(training_epochs):
??? for (x, y) in zip(trX, trY):
??????? grads=grad(trX, trY,W)
??????? optimizer.apply_gradients(zip(grads,[W]))
??????? loss_train =loss(x,y,W).numpy()
??????? loss_list_train.append(loss_train)
??? if i % 20 == 0:
?????????? print("Iteration {}, loss: {}".format(i, loss_train))
w_val = W
print(w_val.numpy())
plt.scatter(trX, trY)
trY2 = 0
for i in range(num_coeffs):
??? trY2 += w_val[i] * np.power(trX, i)
plt.plot(trX, trY2, 'r')
plt.show()