728x90
반응형
SMALL
Sigmoid (0 ~ 1)
import torch
import matplotlib.pyplot as plt
x = torch.linspace(-10, 10, 100)
y = torch.sigmoid(x)
print(x)
print(y)
tensor([-10.0000, -9.7980, -9.5960, -9.3939, -9.1919, -8.9899, -8.7879,
-8.5859, -8.3838, -8.1818, -7.9798, -7.7778, -7.5758, -7.3737,
-7.1717, -6.9697, -6.7677, -6.5657, -6.3636, -6.1616, -5.9596,
-5.7576, -5.5556, -5.3535, -5.1515, -4.9495, -4.7475, -4.5455,
-4.3434, -4.1414, -3.9394, -3.7374, -3.5354, -3.3333, -3.1313,
-2.9293, -2.7273, -2.5253, -2.3232, -2.1212, -1.9192, -1.7172,
-1.5152, -1.3131, -1.1111, -0.9091, -0.7071, -0.5051, -0.3030,
-0.1010, 0.1010, 0.3030, 0.5051, 0.7071, 0.9091, 1.1111,
1.3131, 1.5152, 1.7172, 1.9192, 2.1212, 2.3232, 2.5253,
2.7273, 2.9293, 3.1313, 3.3333, 3.5354, 3.7374, 3.9394,
4.1414, 4.3434, 4.5455, 4.7475, 4.9495, 5.1515, 5.3535,
5.5556, 5.7576, 5.9596, 6.1616, 6.3636, 6.5657, 6.7677,
6.9697, 7.1717, 7.3737, 7.5758, 7.7778, 7.9798, 8.1818,
8.3838, 8.5859, 8.7879, 8.9899, 9.1919, 9.3939, 9.5960,
9.7980, 10.0000])
tensor([4.5398e-05, 5.5561e-05, 6.7998e-05, 8.3220e-05, 1.0185e-04, 1.2465e-04,
1.5255e-04, 1.8669e-04, 2.2848e-04, 2.7961e-04, 3.4219e-04, 4.1877e-04,
5.1247e-04, 6.2713e-04, 7.6741e-04, 9.3905e-04, 1.1490e-03, 1.4059e-03,
1.7201e-03, 2.1044e-03, 2.5743e-03, 3.1488e-03, 3.8510e-03, 4.7091e-03,
5.7573e-03, 7.0371e-03, 8.5990e-03, 1.0504e-02, 1.2825e-02, 1.5651e-02,
1.9089e-02, 2.3263e-02, 2.8323e-02, 3.4445e-02, 4.1834e-02, 5.0724e-02,
6.1383e-02, 7.4107e-02, 8.9217e-02, 1.0705e-01, 1.2795e-01, 1.5224e-01,
1.8018e-01, 2.1196e-01, 2.4766e-01, 2.8719e-01, 3.3025e-01, 3.7635e-01,
4.2482e-01, 4.7477e-01, 5.2523e-01, 5.7518e-01, 6.2365e-01, 6.6975e-01,
7.1281e-01, 7.5234e-01, 7.8804e-01, 8.1982e-01, 8.4776e-01, 8.7205e-01,
8.9295e-01, 9.1078e-01, 9.2589e-01, 9.3862e-01, 9.4928e-01, 9.5817e-01,
9.6555e-01, 9.7168e-01, 9.7674e-01, 9.8091e-01, 9.8435e-01, 9.8717e-01,
9.8950e-01, 9.9140e-01, 9.9296e-01, 9.9424e-01, 9.9529e-01, 9.9615e-01,
9.9685e-01, 9.9743e-01, 9.9790e-01, 9.9828e-01, 9.9859e-01, 9.9885e-01,
9.9906e-01, 9.9923e-01, 9.9937e-01, 9.9949e-01, 9.9958e-01, 9.9966e-01,
9.9972e-01, 9.9977e-01, 9.9981e-01, 9.9985e-01, 9.9988e-01, 9.9990e-01,
9.9992e-01, 9.9993e-01, 9.9994e-01, 9.9995e-01])
import torch
from torch import nn
m = nn.Sigmoid()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([ 0.3165, -0.9407]) tensor([0.5785, 0.2808])
Tanh (-1 ~ 1)
from torch import nn
m = nn.Tanh()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([0.7030, 0.8120]) tensor([0.6063, 0.6707])
ReLU (0 ~)
from torch import nn
m = nn.ReLU()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([ 1.5399, -0.2950]) tensor([1.5399, 0.0000])
LeakyReLU
from torch import nn
m = nn.LeakyReLU(0.1)
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([-0.2309, -0.1199]) tensor([-0.0231, -0.0120])
PReLU
from torch import nn
m = nn.PReLU()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([ 0.2162, -0.2647]) tensor([ 0.2162, -0.0662], grad_fn=<PreluKernelBackward0>)
ELU
from torch import nn
m = nn.ELU()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([1.1404, 0.1660]) tensor([1.1404, 0.1660])
CELU
from torch import nn
m = nn.CELU()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([-0.4080, 0.3577]) tensor([-0.3350, 0.3577])
Swish
import torch
import torch.nn as nn
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
m = Swish()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([-0.7828, 0.4667]) tensor([-0.2456, 0.2868])
Mish
import torch
import torch.nn as nn
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(torch.nn.functional.softplus(x))
m = Mish()
input = torch.randn(2)
output = m(input)
print(input, output)
tensor([1.0841, 0.0139]) tensor([0.9540, 0.0084])
nn.Softmax (확률)
from torch import nn
import numpy as np
m = nn.Softmax(dim=1)
input = torch.randn(2, 3)
output = m(input)
print(input)
print(output)
print(output.sum(dim=1))
print(np.argmax(output, axis=1)) # 최대값의 인덱스를 찾는 함수. max = 최대값
tensor([[-0.1117, -0.6433, 0.3252],
[ 0.1069, 0.8204, -1.1930]])
tensor([[0.3189, 0.1874, 0.4936],
[0.3018, 0.6160, 0.0823]])
tensor([1., 1.])
tensor([2, 1])
https://pytorch.org/docs/stable/nn.html#non-linear-activations-weighted-sum-nonlinearity
728x90
반응형
LIST
'Python Library > PyTorch' 카테고리의 다른 글
[PyTorch] 모델 파라미터 최적화 (0) | 2022.11.21 |
---|---|
[PyTorch] 패션 MNIST (0) | 2022.11.17 |
[PyTorch] 자동 미분 (Automatic differentiation) (0) | 2022.01.13 |
[PyTorch] 모델 매개변수 (Parameter) (0) | 2022.01.13 |
[PyTorch] 모델 계층 (Layer) (0) | 2022.01.13 |