함수 확인표 - LOPES-HUFS/DeepLearningFromForR GitHub Wiki

파이썬 함수

이 프로젝트에서 참고하고 책에서 사용하고 있는 함수

import numpy as np 

cross_entropy_error()

자료 입력

y =np.array([0.1, 0.05, 0.6, 0.0, 0.05, 0.1, 0, 0.1, 0, 0], [0.1, 0.05, 0.1, 0, 0.05, 0.1, 0, 0.6, 0, 0](/LOPES-HUFS/DeepLearningFromForR/wiki/0.1,-0.05,-0.6,-0.0,-0.05,-0.1,-0,-0.1,-0,-0],-[0.1,-0.05,-0.1,-0,-0.05,-0.1,-0,-0.6,-0,-0))

t = np.array([0, 0, 1, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 0, 0](/LOPES-HUFS/DeepLearningFromForR/wiki/0,-0,-1,-0,-0,-0,-0,-0,-0,-0],-[0,-0,-1,-0,-0,-0,-0,-0,-0,-0))

함수 입력

def cross_entropy_error(y, t):
    if y.ndim == 1:
        t = t.reshape(1, t.size)
        y = y.reshape(1, y.size)             
    batch_size = y.shape[0]
    #return -np.sum(t * np.log(y + 1e-7))
    return -np.sum(t * np.log(y + 1e-7)) / batch_size

결과 출력

>>> cross_entropy_error(y, t)
1.406704775046942

softmax()

자료 입력

x = np.array([0.3,2.9,4.0], [0.3,2.9,4.0](/LOPES-HUFS/DeepLearningFromForR/wiki/0.3,2.9,4.0],-[0.3,2.9,4.0))
def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - np.max(x, axis=0)
        y = np.exp(x) / np.sum(np.exp(x), axis=0)
        return y.T 

    x = x - np.max(x) # 오버플로 대책
    return np.exp(x) / np.sum(np.exp(x))

결과 출력

>>> softmax(x)
array([[0.01821127, 0.24519181, 0.73659691],
       [0.01821127, 0.24519181, 0.73659691]])