2024.03.13 15:17
import tensorflow as tfimport matplotlib.pyplot as pltimport numpy as np X = [0, 0.5, 1.0, 1.5, 2.0, 2.5]Y = [0.3, 1.9, 2.4, 4.1, 6.8, 7.9] x_mean = sum(X)/len(X)y_mean = sum(Y)/len(Y) division = sum((y-y_mean)*(x-x_mean) for y, x in list(zip(Y,X)))divisor = sum((x-x_mean)**2 for x in X) a = division / divisorb = y_mean - a * x_mean new_X = np.arange(0, 3, 0.05)new_Y = a * new_X + b print('a:', a, 'b:', b) plt.plot(X, Y, 'ro', label='Sample Data')plt.plot(new_X, new_Y, 'b-', label='Test Data')plt.xlabel('X')plt.ylabel('Y')plt.legend()plt.show()
최소 제곱법은 손실을 구하기 위해 사용한다.
import tensorflow as tfimport matplotlib.pyplot as plt X = [0, 0.5, 1.0, 1.5, 2.0, 2.5]Y = [0.3, 1.9, 2.4, 4.1, 6.8, 7.9] W = tf.Variable(tf.random.uniform([1], -1.0, 1.0))b = tf.Variable(tf.random.uniform([1], -1.0, 1.0)) optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.1) @tf.function()def cost_eval(): hypothesis = W * X + b cost = tf.reduce_mean(tf.square(hypothesis - Y)) return cost print("\n W b cost")for epoch in range(10): optimizer.minimize(cost_eval, var_list=[W, b]) print(epoch, W.numpy(), b.numpy(), cost_eval().numpy()) print("\n=====test=====")x=5print("X:", x, "Y:", (W * x + b).numpy())x=2.5print("X:", x, "Y:", (W * x + b).numpy()) new_X = tf.range(0, 3, 0.05)new_Y = W * new_X + b plt.plot(X, Y, 'ro', label='Sample Data')plt.plot(new_X, new_Y, 'b-')plt.xlabel('X')plt.ylabel('Y')plt.legend()plt.show()