Multi-Variable Linear Regression LAB
19146 ワード
Multi-Variable Linear Regression
H(x1,x2,x3)=w1x1+w2x2+w3x3+bH(x_1, x_2, x_3) = w_1x_1 + w_2x_2 + w_3x_3 + bH(x1,x2,x3)=w1x1+w2x2+w3x3+b
Code
import tensorflow as tf
# data and label
x1 = [73., 93., 89., 96., 73.]
x2 = [80., 88., 91., 98., 66.]
x3 = [75., 93., 90., 100., 70.]
Y = [152., 185., 180., 196., 142.]
# weights and bias
w1 = tf.Variable(tf.random.normal([1]))
w2 = tf.Variable(tf.random.normal([1]))
w3 = tf.Variable(tf.random.normal([1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.000001
for i in range(1001):
# tf.GradientTape() to record the gradient of the cost function
with tf.GradientTape() as tape:
hypothesis = (w1 * x1) + (w2 * x2) + (w3 * x3) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# calculates the gradients of the cost
w1_grad, w2_grad, w3_grad, b_grad = tape.gradient(cost, [w1, w2, w3, b])
# update w1, w2, w3, and b
# w1.assign_sub(learning_rate * w1_grad)
# => w1 = w1 - learning_rate * w1_grad
w1.assign_sub(learning_rate * w1_grad)
w2.assign_sub(learning_rate * w2_grad)
w3.assign_sub(learning_rate * w3_grad)
b.assign_sub(learning_rate * b_grad)
if i % 50 == 0:
print(f'{i:5} | {cost.numpy():12.4f}')
Output
costは非常に大きな値であり,更新された値によって徐々に減少することがわかる.ただし,変数(feature)が増えるにつれてweights値も一つ一つ記入する.
Using Matrix H(X)=XWH(X) = XWH(X)=XW
Code
import numpy as np
data = np.array([
# x1, x2, x3, y
[ 73., 80., 75., 152.],
[ 93., 88., 93., 185.],
[ 89., 91., 90., 180.],
[ 96., 98., 100., 196.],
[ 73., 66., 70., 142.]
], dtype=np.float32)
# slice data
X = data[:, :-1]
y = data[:, [-1]]
# X 의 변수(열)가 3개이므로 W의 행 값은 3개가 되어야 한다.
# 그리고 개별 인스턴스는 1개 이므로 W의 열의 값은 1개가 되어야 한다.
# y(출력값)은 하나이므로 1개가 되어야 한다.
W = tf.Variable(tf.random.normal([3, 1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.000001
# hypothesis, prediction function
# 이후에 b 는 생략할 수도 있다.
def predict(X):
return tf.matmul(X, W) + b
n_epochs = 2000
for i in range(n_epochs + 1):
# record the gradient descent of the cost function
with tf.GradientTape() as tape:
cost = tf.reduce_mean(tf.square(predict(X) - y))
# calculates the gradients of the cost
W_grad, b_grad = tape.gradient(cost, [W, b])
# update w1, w2, w3, and b
# w1.assign_sub(learning_rate * w1_grad)
# => w1 = w1 - learning_rate * w1_grad
W.assign_sub(learning_rate * W_grad)
b.assign_sub(learning_rate * b_grad)
if i % 100 == 0:
print(f'{i:5} | {cost.numpy():10.4f}')
Output
With Matrix
行列を使用すると、変数の個数に応じて重み付けを使用する必要がなく、同じ式を使用できます.また、性能面でも有利である.
Reference
この問題について(Multi-Variable Linear Regression LAB), 我々は、より多くの情報をここで見つけました
https://velog.io/@quasarhub/Multi-Variable-Linear-Regression-LAB
テキストは自由に共有またはコピーできます。ただし、このドキュメントのURLは参考URLとして残しておいてください。
Collection and Share based on the CC Protocol
import tensorflow as tf
# data and label
x1 = [73., 93., 89., 96., 73.]
x2 = [80., 88., 91., 98., 66.]
x3 = [75., 93., 90., 100., 70.]
Y = [152., 185., 180., 196., 142.]
# weights and bias
w1 = tf.Variable(tf.random.normal([1]))
w2 = tf.Variable(tf.random.normal([1]))
w3 = tf.Variable(tf.random.normal([1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.000001
for i in range(1001):
# tf.GradientTape() to record the gradient of the cost function
with tf.GradientTape() as tape:
hypothesis = (w1 * x1) + (w2 * x2) + (w3 * x3) + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# calculates the gradients of the cost
w1_grad, w2_grad, w3_grad, b_grad = tape.gradient(cost, [w1, w2, w3, b])
# update w1, w2, w3, and b
# w1.assign_sub(learning_rate * w1_grad)
# => w1 = w1 - learning_rate * w1_grad
w1.assign_sub(learning_rate * w1_grad)
w2.assign_sub(learning_rate * w2_grad)
w3.assign_sub(learning_rate * w3_grad)
b.assign_sub(learning_rate * b_grad)
if i % 50 == 0:
print(f'{i:5} | {cost.numpy():12.4f}')
Code
import numpy as np
data = np.array([
# x1, x2, x3, y
[ 73., 80., 75., 152.],
[ 93., 88., 93., 185.],
[ 89., 91., 90., 180.],
[ 96., 98., 100., 196.],
[ 73., 66., 70., 142.]
], dtype=np.float32)
# slice data
X = data[:, :-1]
y = data[:, [-1]]
# X 의 변수(열)가 3개이므로 W의 행 값은 3개가 되어야 한다.
# 그리고 개별 인스턴스는 1개 이므로 W의 열의 값은 1개가 되어야 한다.
# y(출력값)은 하나이므로 1개가 되어야 한다.
W = tf.Variable(tf.random.normal([3, 1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.000001
# hypothesis, prediction function
# 이후에 b 는 생략할 수도 있다.
def predict(X):
return tf.matmul(X, W) + b
n_epochs = 2000
for i in range(n_epochs + 1):
# record the gradient descent of the cost function
with tf.GradientTape() as tape:
cost = tf.reduce_mean(tf.square(predict(X) - y))
# calculates the gradients of the cost
W_grad, b_grad = tape.gradient(cost, [W, b])
# update w1, w2, w3, and b
# w1.assign_sub(learning_rate * w1_grad)
# => w1 = w1 - learning_rate * w1_grad
W.assign_sub(learning_rate * W_grad)
b.assign_sub(learning_rate * b_grad)
if i % 100 == 0:
print(f'{i:5} | {cost.numpy():10.4f}')
Output
With Matrix
行列を使用すると、変数の個数に応じて重み付けを使用する必要がなく、同じ式を使用できます.また、性能面でも有利である.
Reference
この問題について(Multi-Variable Linear Regression LAB), 我々は、より多くの情報をここで見つけました https://velog.io/@quasarhub/Multi-Variable-Linear-Regression-LABテキストは自由に共有またはコピーできます。ただし、このドキュメントのURLは参考URLとして残しておいてください。
Collection and Share based on the CC Protocol