You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# parameterslearning_rate=0.001training_epochs=15batch_size=100# 3개의 레이어!linear1=torch.nn.Linear(784, 256, bias=True).to(device) # 784(28 * 28)를 입력으로 받아 256 출력!linear2=torch.nn.Linear(256, 256, bias=True).to(device) # 256 입력 받아 256 출력!linear3=torch.nn.Linear(256, 10, bias=True).to(device) # 256 입력 받아 10(0~9) 출력!relu=torch.nn.ReLU()
# weight 초기화torch.nn.init.normal_(linear1.weight)
torch.nn.init.normal_(linear2.weight)
torch.nn.init.normal_(linear3.weight)
# 모델model=torch.nn.Sequential(linear1, relu, linear2, relu, linear3).to(device) # 마지막 레이어 이후 활성화 함수를 넣지 않은 이유는, 밑에서 CrossEntropyLoss()를 이용해 Loss를 계산할 때 그 함수에서 자동으로 softmax 활성화함수 처리를 해주기 때문criterion=torch.nn.CrossEntropyLoss().to(device)
optimizer=torch.optim.Adam(model.parameters(), lr=learning_rate)
# 학습total_batch=len(data_loader)
forepochinrange(training_epochs):
avg_cost=0total_batch=len(data_loader)
forX, Yindata_loader:
X=X.view(-1, 28*28).to(device)
hypothesis=model(X)
cost=criterion(hypothesis, Y)
optimizer.zero_grad()
cost.backward()
avg_cost+=cost/total_batchprint("Epoch: ", "%04d"% (epoch+1), "cost =", "{:.9f}".format(avg_cost)) # Accuracy : 94%