Открыть боковую панель
yarburart
keai
Коммиты
ae5291ef
Коммит
ae5291ef
создал
Мар 15, 2023
по автору
YarBurArt
Просмотр файлов
clear libs
владелец
e8642a1f
Изменения
1
Скрыть пробелы
Построчно
Рядом
pytorch_nn.py
Просмотр файла @
ae5291ef
import
torch
from
torch.autograd
import
Variable
import
torch.nn
as
nn
import
torch.nn.functional
as
F
import
torch.optim
as
optim
from
torchvision
import
datasets
,
transforms
from
torch.nn
import
Module
,
Linear
,
NLLLoss
from
torch.nn.functional
import
relu
,
log_softmax
from
torch.optim
import
SGD
from
torchvision.datasets
import
MNIST
from
torchvision.transforms
import
Compose
,
ToTensor
,
Normalize
from
torch.utils.data
import
DataLoader
def
simple_gradient
():
# print the gradient of 2x^2 + 5x
x
=
Variable
(
torch
.
ones
(
2
,
2
)
*
2
,
requires_grad
=
True
)
...
...
@@ -19,39 +21,39 @@ def create_nn(batch_size=200, learning_rate=0.01, epochs=10,
log_interval
=
10
):
train_loader
=
DataLoader
(
datasets
.
MNIST
(
'../data'
,
train
=
True
,
download
=
True
,
transform
=
transforms
.
Compose
([
transforms
.
ToTensor
(),
transforms
.
Normalize
((
0.1307
,),
(
0.3081
,))
])),
MNIST
(
'../data'
,
train
=
True
,
download
=
True
,
transform
=
Compose
([
ToTensor
(),
Normalize
((
0.1307
,),
(
0.3081
,))
])),
batch_size
=
batch_size
,
shuffle
=
True
)
test_loader
=
DataLoader
(
datasets
.
MNIST
(
'../data'
,
train
=
False
,
transform
=
transforms
.
Compose
([
transforms
.
ToTensor
(),
transforms
.
Normalize
((
0.1307
,),
(
0.3081
,))
MNIST
(
'../data'
,
train
=
False
,
transform
=
Compose
([
ToTensor
(),
Normalize
((
0.1307
,),
(
0.3081
,))
])),
batch_size
=
batch_size
,
shuffle
=
True
)
class
Net
(
nn
.
Module
):
class
Net
(
Module
):
def
__init__
(
self
):
super
(
Net
,
self
).
__init__
()
self
.
fc1
=
nn
.
Linear
(
28
*
28
,
200
)
self
.
fc2
=
nn
.
Linear
(
200
,
200
)
self
.
fc3
=
nn
.
Linear
(
200
,
10
)
self
.
fc1
=
Linear
(
28
*
28
,
200
)
self
.
fc2
=
Linear
(
200
,
200
)
self
.
fc3
=
Linear
(
200
,
10
)
def
forward
(
self
,
x
):
x
=
F
.
relu
(
self
.
fc1
(
x
))
x
=
F
.
relu
(
self
.
fc2
(
x
))
x
=
relu
(
self
.
fc1
(
x
))
x
=
relu
(
self
.
fc2
(
x
))
x
=
self
.
fc3
(
x
)
return
F
.
log_softmax
(
x
)
return
log_softmax
(
x
)
net
=
Net
()
print
(
net
)
# create a stochastic gradient descent optimizer
optimizer
=
optim
.
SGD
(
net
.
parameters
(),
lr
=
learning_rate
,
momentum
=
0.9
)
optimizer
=
SGD
(
net
.
parameters
(),
lr
=
learning_rate
,
momentum
=
0.9
)
# create a loss function
criterion
=
nn
.
NLLLoss
()
criterion
=
NLLLoss
()
# run the main training loop
for
epoch
in
range
(
epochs
):
...
...
@@ -67,7 +69,7 @@ def create_nn(batch_size=200, learning_rate=0.01, epochs=10,
if
batch_idx
%
log_interval
==
0
:
print
(
'Train Epoch: {} [{}/{} ({:.0f}%)]
\t
Loss: {:.6f}'
.
format
(
epoch
,
batch_idx
*
len
(
data
),
len
(
train_loader
.
dataset
),
100.
*
batch_idx
/
len
(
train_loader
),
loss
.
data
[
0
]))
100.
*
batch_idx
/
len
(
train_loader
),
loss
.
data
[
0
]))
# run a test loop
test_loss
=
0
...
...
@@ -81,10 +83,11 @@ def create_nn(batch_size=200, learning_rate=0.01, epochs=10,
pred
=
net_out
.
data
.
max
(
1
)[
1
]
# get the index of the max log-probability
correct
+=
pred
.
eq
(
target
.
data
).
sum
()
test_loss
/=
len
(
test_loader
.
dataset
)
len_test_dataset
=
len
(
test_loader
.
dataset
)
test_loss
/=
len_test_dataset
print
(
'
\n
Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)
\n
'
.
format
(
test_loss
,
correct
,
len
(
test_
loader
.
dataset
)
,
100.
*
correct
/
len
(
test_
loader
.
dataset
))
)
test_loss
,
correct
,
len
_
test_dataset
,
100.
*
correct
/
len
_
test_dataset
))
if
__name__
==
"__main__"
:
...
...
Редактирование
Предварительный просмотр
Поддерживает Markdown
0%
Попробовать снова
или
прикрепить новый файл
.
Отмена
You are about to add
0
people
to the discussion. Proceed with caution.
Сначала завершите редактирование этого сообщения!
Отмена
Пожалуйста,
зарегистрируйтесь
или
войдите
чтобы прокомментировать