728x90
    
    
  반응형
    
    
    
  from torch.utils.tensorboard import SummaryWriter
import numpy as np
img_batch = np.zeros((16, 3, 100, 100))
for i in range(16):
  img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100)/10000/16*i
  img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100)/10000)/16*i
writer = SummaryWriter(logs_base_dir)
writer.add_images('my_image_batch', img_batch, 0)
writer.flush()
writer.close()기본 개념
- scalar : metric 등 상수 값의 연속(epoch) 표시
- graph : 모델의 computational graph 표시
- histogram : weight등 값의 분포를 표현
- image & text : 예측값과 실제 값을 비교
- mesh : 3D 형태의 데이터를 표현하는 도구
예시
1. scalar
import os
logs_base_dir = 'logs'
os.makedirs(logs_base_dir, exist_ok=True)
from torch.utils.tensorboard import SummaryWriter
import numpy as np
writer = SummaryWriter(logs_base_dir)
for n_iter in range(100):
  writer.add_scalar("Loss/train", np.random.random(), n_iter)
  writer.add_scalar("Loss/test", np.random.random(), n_iter)
  writer.add_scalar("Accuracy/train", np.random.random(), n_iter)
  writer.add_scalar("Accuracy/test", np.random.random(), n_iter)
writer.flush()
%load_ext tensorboard # jupyter notebook
%tensorboard --logdir {logs_base_dir} # jupyter notebook
2. Histogram
from torch.utils.tensorboard import SummaryWriter
import numpy as np
# 주로 Weight값을 조정할 때 많이 사용
writer = SummaryWriter(logs_base_dir)
for i in range(10): # 1000개의 데이터 꾸러미 10개
  x = np.random.random(1000)
  writer.add_histogram('distribution centers', i+x, i)
writer.close()
3. 이미지
from torch.utils.tensorboard import SummaryWriter
import numpy as np
img_batch = np.zeros((16, 3, 100, 100))
for i in range(16):
  img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100)/10000/16*i
  img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100)/10000)/16*i
writer = SummaryWriter(logs_base_dir)
writer.add_images('my_image_batch', img_batch, 0)
writer.flush()
writer.close()
4. 응용
데이터 로더 생성
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
# transforms
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, ),(0.5,))
])
# datasets
trainset = torchvision.datasets.FashionMNIST('./data',
                                             download=True,
                                             train=True,
                                             transform=transform)
testset = torchvision.datasets.FashionMNIST('./data',
                                            download=True,
                                            train=False,
                                            transform=transform)
#dataloader
trainloader = torch.utils.data.DataLoader(trainset,
                                         batch_size=4,
                                         shuffle=True, 
                                         num_workers=2)
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=4,
                                         shuffle=False,
                                         num_workers=2)
# constant for classes
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
           'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle Boot')모델 정의
class Net(nn.Module):
  def __init__(self):
    super(Net, self).__init__()
    self.conv1 = nn.Conv2d(1, 6, 5)
    self.pool = nn.MaxPool2d(2, 2)
    self.conv2 = nn.Conv2d(6, 16, 5)
    self.fc1 = nn.Linear(16*4*4, 120)
    self.fc2 = nn.Linear(120, 84)
    self.fc3 = nn.Linear(84, 10) # 10 클래스
  def forward(self, x):
    x = self.pool(F.relu(self.conv1(x)))
    x = self.pool(F.relu(self.conv2(x)))
    x = x.view(-1, 16*4*4)
    x = F.relu(self.fc1(x))
    x = F.relu(self.fc2(x))
    x = self.fc3(x)
    return x
  
net = Net()헬퍼함수 정의
def images_to_probs(net, images):
  """
  이미지 예측 결과와 그 확률 계산
  """
  output = net(images)
  _, preds_tensor = torch.max(output, 1)
  preds = np.squeeze(preds_tensor.numpy())
  return preds, [F.softmax(el, dim=0)[i].item() for i, el in zip(preds, output)]
def plot_classes_preds(net, images, labels):
  preds, probs = images_to_probs(net, images)
  fig = plt.figure(figsize=(12, 48))
  for idx in np.arange(4):
    ax = fig.add_subplot(1, 4, idx+1, xticks=[], yticks=[])
    matplotlib_imshow(images[idx], one_channel=True)
    ax.set_title(f"{classes[preds[idx]]}, {probs[idx]*100:.1f}\n(label :{classes[labels[idx]]}",
                 color=("green" if preds[idx]==labels[idx].item() else "red"))
  return fig학습
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
running_loss = 0.0
for epoch in range(10):
  print(epoch)
  for i, data in enumerate(trainloader, 0):
    inputs, labels = data
    optimizer.zero_grad()
    outputs = net(inputs)
    loss = criterion(outputs, labels)
    loss.backward()
    optimizer.step()
    running_loss += loss.item()
    
    if i%1000 ==999:
      writer.add_scalar("training loss", 
                        running_loss/1000,
                        epoch*len(trainloader)+i)
      writer.add_figure("predictions vs actuals",
                        plot_classes_preds(net, inputs, labels),
                        global_step=epoch*len(trainloader)+i)
      running_loss = 0.0예측
class_probs = []
class_label = []
with torch.no_grad():
  for data in testloader:
    images, labels = data
    output = net(images)
    class_probs_batch = [F.softmax(el, dim=0) for el in output]
    class_probs.append(class_probs_batch)
    class_label.append(labels)
test_probs = torch.cat([torch.stack(batch) for batch in class_probs])
test_label = torch.cat(class_label)
# helper function
def add_pr_curve_tensorboard(class_index, test_probs, test_label, global_step=0):
  tensorboard_truth = test_label == class_index
  tensorboard_probs = test_probs[:, class_index]
  writer.add_pr_curve(classes[class_index],
                      tensorboard_truth,
                      tensorboard_probs,
                      global_step=global_step)
  writer.close()
# plot all the pr curve
for i in range(len(classes)):
  add_pr_curve_tensorboard(i, test_probs, test_label)
728x90
    
    
  반응형
    
    
    
  'Programming' 카테고리의 다른 글
| 모든 랜덤값을 고정하기 위한 함수 (python) (0) | 2022.12.23 | 
|---|---|
| [monitoring tools] wandb (weights & biases) (0) | 2022.12.06 | 
| [pytorch] 전이 학습, Transfer Learning (0) | 2022.12.03 | 
| [pytorch] 학습 결과 및 모델 저장하기 (0) | 2022.12.03 | 
| [pytorch] Dataset & Dataloader (0) | 2022.12.03 | 
