DeepLearning_L02 - 8BitsCoding/RobotMentor GitHub Wiki
Menu
์ด๋ก
๊ฐ์ค H(x)๋ฅผ ์๋์ ๊ฐ์ด ์ ์ ํ๋ค๋ฉด
๋ชฉ์ : ๊ฐ์ค๊ณผ์ ๊ฑฐ๋ฆฌ๊ฐ ๊ฐ์ฅ ๊ฐ๊น์ด ๊ทธ๋ํ๋ฅผ ์ฐพ๋๋ค.
๋ค์์ costํจ์๊ฐ ์ต์๊ฐ ๋๋ค๋ฉด? -> ๋ชฉ์ ์ ์ด๋ฃฐ ์ ์๋ค.
์ด๋ป๊ฒ ์ต์ํ ํ๋์ง๋ ๋ค์๊ฐ์์์ ์ค๋ช (์ฐธ๊ณ ๋ก ์๋ ๊ตฌํ๋ถ์ ์ค๋ช ์ด ๋์ด ์์ง๋ง gradient descent algorithm์ ์ด์ฉํ๋ค.)
์ค์ต
๊ธฐ๋ณธ์ import
# Lab 2 Linear Regression
import tensorflow as tf
tf.set_random_seed(777) # for reproducibility
tf.set_random_seed(777) # for reproducibility
๋ ์์ง ๋ญ์ง ๋ชจ๋ฆ ์ผ๋จ ๋ฌด์
# X and Y data
x_train = [1, 2, 3]
y_train = [1, 2, 3]
๊ฐ๋จํ x, y๊ฐ์ ์ฃผ์ด์ง๊ณ
์ฃผ์ด์ง x, y๋ฐ์ดํฐ์ ๋ฐ๋ฅธ W, b(Weight, bias)๋ฅผ ์ฐพ์๋ณด์.
# Try to find values for W and b to compute y_data = x_data * W + b
# We know that W should be 1 and b should be 0
# But let TensorFlow figure it out
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
tf.Variable
์์ ๋งํ๋ variable์ tensorflow๊ฐ ์ฌ์ฉํ๋ variable์ ์๋ฏธ tensorflow๊ฐ ๊ฐ์ ๋ฐ๊ฟ๊ฐ๋ฉฐ ์ฌ์ฉํ ๊ฐ์. (tranninable varible์ด๋ผ๊ณ ๋ ํ๋ค.)
W = tf.Variable(tf.random_normal([1]), name="weight")
ํด์ :W๋ผ๋ tensor๋ฅผ Variable(tensorflow๊ฐ ๋ณ๊ฒฝ๊ฐ๋ฅํ ๊ฐ)์ผ๋ก ๋๊ณ random_normal๋ฅผ ๋ฃ๋๋ฐ shape์ [1]์ด๋ค
# Our hypothesis XW+b
hypothesis = x_train * W + b
๊ฐ์ค์ ์์๊ฐ์ด ์ ์ ํ ์ ์๊ณ
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
ํจ์๋ ์์ ๊ฐ์ด ์ ์ ํ ์ ์๋ค.
tf.reduce_mean
๋ ํ๊ท ์ ๋ง๋ค์ด์ฃผ๋ ๊ฒ์ ์๋ฏธ
# optimizer
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
GradientDescentOptimizer
๋ฅผ ํตํ์ฌ cost(lost)๋ฅผ minimizeํ๋ค.minimize๋ ์ด๋ป๊ฒ ํ๋๋ฐ?? -> ์ง๊ธ์ tensorflow๊ฐ ํด์ฃผ๋ ๋งค์ง์ด๋ผ ์๊ฐ
# Launch the graph in a session.
with tf.Session() as sess:
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
์ธ์ ์ ์์ฑ ํ
with tf.Session() as sess:
W, b๋ผ๋ tensorflow์ variable์ ์ฌ์ฉํ๊ธฐ ์ํด์
sess.run(tf.global_variables_initializer())
๋ฅผ ํธ์ถํด ์ฃผ์ด์ผํ๋ค.
# Fit the line
for step in range(2001):
_, cost_val, W_val, b_val = sess.run([train, cost, W, b])
# 2001๋ฒ ๋๋๋ฐ 20๋ฒ์ ํ ๋ฒ์ฉ ์ถ๋ ฅํด์ค
if step % 20 == 0:
print(step, cost_val, W_val, b_val)
์ ์ผ ๊ถ๊ธํ ์ ์
_, cost_val, W_val, b_val = sess.run([train, cost, W, b])
๊ฐ ์ด๋ป๊ฒ ๋์ํ๋์ง ์ด๋ค
sess.run
์ธ์ ์ ๋๋ฆฌ๋๋ฐ
([train, cost, W, b])
๊ฐ ๋ ธ๋๋ฅผ ๋๋ ค๋ฌ๋ผ
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
costํจ์๋ฅผ ์ต์ํ ํ๊ฒ ๋ง๋ค๊ณ ์ถ์๋ฐ tensorflow๊ฐ ๋ณ๊ฒฝ๊ฐ๋ฅํ ๋ณ์๋ W, b๋ก ์ ์ธํ๊ธฐ์ W, b๋ฅผ ๋ณ๊ฒฝํ๋ฉฐ ๋์
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
W, b๋ ๋ณ์๋ก ์ ์ธ
์ ์ฒด์ฝ๋
# Lab 2 Linear Regression
import tensorflow as tf
tf.set_random_seed(777) # for reproducibility
# X and Y data
x_train = [1, 2, 3]
y_train = [1, 2, 3]
# Try to find values for W and b to compute y_data = x_data * W + b
# We know that W should be 1 and b should be 0
# But let TensorFlow figure it out
W = tf.Variable(tf.random_normal([1]), name="weight")
b = tf.Variable(tf.random_normal([1]), name="bias")
# Our hypothesis XW+b
hypothesis = x_train * W + b
# cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - y_train))
# optimizer
train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)
# Launch the graph in a session.
with tf.Session() as sess:
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
# Fit the line
for step in range(2001):
_, cost_val, W_val, b_val = sess.run([train, cost, W, b])
if step % 20 == 0:
print(step, cost_val, W_val, b_val)
# Learns best fit W:[ 1.], b:[ 0.]
"""
0 2.82329 [ 2.12867713] [-0.85235667]
20 0.190351 [ 1.53392804] [-1.05059612]
40 0.151357 [ 1.45725465] [-1.02391243]
...
1960 1.46397e-05 [ 1.004444] [-0.01010205]
1980 1.32962e-05 [ 1.00423515] [-0.00962736]
2000 1.20761e-05 [ 1.00403607] [-0.00917497]
"""