import torch
import torchvision
import torch.nn as nn
import numpy as np
import torchvision.transforms as transforms # ================================================================== #
# Table of Contents #
# ================================================================== # # 1. Basic autograd example 1 (Line 25 to 39)
# 2. Basic autograd example 2 (Line 46 to 83)
# 3. Loading data from numpy (Line 90 to 97)
# 4. Input pipline (Line 104 to 129)
# 5. Input pipline for custom dataset (Line 136 to 156)
# 6. Pretrained model (Line 163 to 176)
# 7. Save and load model (Line 183 to 189) # ================================================================== #
# 1. Basic autograd example 1 #
# ================================================================== # # Create tensors.
x = torch.tensor(1., requires_grad=True)
w = torch.tensor(2., requires_grad=True)
b = torch.tensor(3., requires_grad=True) # Build a computational graph.
y = w * x + b # y = 2 * x + 3 # Compute gradients.
y.backward() # Print out the gradients.
print(x.grad) # x.grad = 2
print(w.grad) # w.grad = 1
print(b.grad) # b.grad = 1 # ================================================================== #
# 2. Basic autograd example 2 #
# ================================================================== # # Create tensors of shape (10, 3) and (10, 2).
x = torch.randn(10, 3)
y = torch.randn(10, 2) # Build a fully connected layer.
linear = nn.Linear(3, 2) # x*weight^T + bias <--> y
print('w: ', linear.weight) # (out_features, in_features)
print('b: ', linear.bias) # out_features # Build loss function and optimizer.
criterion = nn.MSELoss(reduction='elementwise_mean') # mean square error
optimizer = torch.optim.SGD(linear.parameters(), lr=0.01) # Forward pass.
pred = linear(x) # Compute loss.
loss = criterion(pred, y)
print('loss: ', loss.item()) # Backward pass.
loss.backward() # Print out the gradients.
print('dL/dw: ', linear.weight.grad)
print('dL/db: ', linear.bias.grad) # 1-step gradient descent(one forward and backward).
optimizer.step() # You can also perform gradient descent at the low level.
# linear.weight.data.sub_(0.01 * linear.weight.grad.data)
# linear.bias.data.sub_(0.01 * linear.bias.grad.data) # Print out the loss after 1-step gradient descent.
pred = linear(x)
loss = criterion(pred, y)
print('loss after 1 step optimization: ', loss.item()) # ================================================================== #
# 3. Loading data from numpy #
# ================================================================== # # Create a numpy array.
x = np.array([[1, 2], [3, 4]]) # Convert the numpy array to a torch tensor.
y = torch.from_numpy(x) # Convert the torch tensor to a numpy array.
z = y.numpy() # ================================================================== #
# 4. Input pipline #
# ================================================================== # # Download and construct CIFAR-10 dataset.
train_dataset = torchvision.datasets.CIFAR10(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True) # Fetch one data pair (read data from disk).
image, label = train_dataset[0]
print (image.size())
print (label) # Data loader (this provides queues and threads in a very simple way).
train_loader = torch.utils.data.DataLoader(dataset = train_dataset,
batch_size = 64,
shuffle = True) # When iteration starts, queue and thread start to load data from files.
data_iter = iter(train_loader) # Mini-batch images and labels.
images, labels = data_iter.next() # Actual usage of the data loader is as below.
for batch_idx, (image, labels) in enumerate(train_loader, 0):
# Training code should be written here.
pass # ================================================================== #
# 5. Input pipline for custom dataset #
# ================================================================== # # You should your build your custom dataset as below.
class CustomDataset(torch.utils.data.Dataset):
def __init__(self):
# TODO
# 1. Initialize file paths or a list of file names.
pass
# xy = np.loadtxt('../../data/diabets.csv.gz')
# self.len = xy.shape[0]
# self.x_data = torch.from_numpy(xy[:, 0:-1])
# self.y_data = torch.from_numpy(xy[:, [-1]]) def __getitem__(self, index):
# TODO
# 1. Read one data from file (e.g. using numpy.fromfile, PIL.Image.open).
# 2. Preprocess the data (e.g. torchvision.Transform).
# 3. Return a data pair (e.g. image and label).
pass
# return self.x_data[index], self.y_data[index] def __len__(self):
# You should change 0 to the total size of your dataset.
return 0
# return self.len # You can then use the prebuilt data loader.
custom_dataset = CustomDataset()
train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
batch_size=32,
shuffle=True) # ================================================================== #
# 6. Pretrained model #
# ================================================================== # # Download and load the pretrained ResNet-18.
resnet = torchvision.models.resnet18(pretrained=True) # If you want to finetune only the top layer of the model, set as below.
for param in resnet.parameters():
param.requires_grad = False # Replace the top layer for finetuning.
resnet.fc = nn.Linear(resnet.fc.in_features, 100) # 100 is an example. # Forward pass.
images = torch.randn(64, 3, 224, 224)
outputs = resnet(images)
print (outputs.size()) # (64, 100) # ================================================================== #
# 7. Save and load the model #
# ================================================================== # # Save and load the entire model.
torch.save(resnet, 'model.ckpt')
model = torch.load('model.ckpt') # Save and load only the model parameters (recommended).
torch.save(resnet.state_dict(), 'params.ckpt')
resnet.load_state_dict(torch.load('params.ckpt'))
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt # Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001 # Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32) y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32) # Linear regression model
model = nn.Linear(input_size, output_size) # Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the model
for epoch in range(num_epochs):
# Convert numpy arrays to torch tensors
inputs = torch.from_numpy(x_train)
targets = torch.from_numpy(y_train) # Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (epoch+1) % 5 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item())) # Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show() # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms # Hyper-parameters
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001 # MNIST dataset (images and labels)
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True) test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor()) # Data loader (input pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False) # Logistic regression model
model = nn.Linear(input_size, num_classes) # Loss and optimizer
# nn.CrossEntropyLoss() computes softmax internally
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Reshape images to (batch_size, input_size)
images = images.reshape(-1, 28*28) # Forward pass
outputs = model(images)
loss = criterion(outputs, labels) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item())) # Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum() print('Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total)) # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms # Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Hyper-parameters
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001 # MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True) test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor()) # Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False) # Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes) def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out model = NeuralNet(input_size, hidden_size, num_classes).to(device) # Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device) # Forward pass
outputs = model(images)
loss = criterion(outputs, labels) # Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step() if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item())) # Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total)) # Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')

PyTorch(一)Basics的更多相关文章

  1. Keras vs. PyTorch

    We strongly recommend that you pick either Keras or PyTorch. These are powerful tools that are enjoy ...

  2. 吐血整理:PyTorch项目代码与资源列表 | 资源下载

    http://www.sohu.com/a/164171974_741733   本文收集了大量基于 PyTorch 实现的代码链接,其中有适用于深度学习新手的“入门指导系列”,也有适用于老司机的论文 ...

  3. 转 Pytorch 教学资料

    本文收集了大量PyTorch项目(备查) 转自:https://blog.csdn.net/fuckliuwenl/article/details/80554182 目录: 入门系列教程 入门实例 图 ...

  4. (原)pytorch中使用TensorRT

    转载请注明出处: https://www.cnblogs.com/darkknightzh/p/11332155.html 代码网址: https://github.com/darkknightzh/ ...

  5. 数值最优化:一阶和二阶优化算法(Pytorch实现)

    1 最优化概论 (1) 最优化的目标 最优化问题指的是找出实数函数的极大值或极小值,该函数称为目标函数.由于定位\(f(x)\)的极大值与找出\(-f(x)\)的极小值等价,在推导计算方式时仅考虑最小 ...

  6. 资源分享 | PyTea:不用运行代码,静态分析pytorch模型的错误

    ​  前言  ​​​​​​​本文介绍一个Pytorch模型的静态分析器 PyTea,它不需要运行代码,即可在几秒钟之内扫描分析出模型中的张量形状错误.文末附使用方法. 本文转载自机器之心 编辑:CV技 ...

  7. 使用PyTorch构建神经网络以及反向传播计算

    使用PyTorch构建神经网络以及反向传播计算 前一段时间南京出现了疫情,大概原因是因为境外飞机清洁处理不恰当,导致清理人员感染.话说国外一天不消停,国内就得一直严防死守.沈阳出现了一例感染人员,我在 ...

  8. 使用PyTorch构建神经网络模型进行手写识别

    使用PyTorch构建神经网络模型进行手写识别 PyTorch是一种基于Torch库的开源机器学习库,应用于计算机视觉和自然语言处理等应用,本章内容将从安装以及通过Torch构建基础的神经网络,计算梯 ...

  9. 通过示例学习PYTORCH

    注意:这是旧版本的PyTorch教程的一部分.你可以在Learn the Basics查看最新的开始目录. 该教程通过几个独立的例子较少了PyTorch的基本概念. 核心是:PyTorch提供了两个主 ...

随机推荐

  1. SVG绘制太极图

    思路:先画一整个圆,填充颜色为黑色,再用一个边框和填充颜色均为白色的长方形覆盖右半边的半圆,再以同一个圆心,相同半径绘制一整个圆,该圆的边线颜色为黑色,没有填充颜色,最后常规操作再画四个小圆 源代码: ...

  2. 如何将 jar 包导入Maven 本地仓库

    案例:oracle jar包由于在maven 远程仓库中找不到,需要先将oracle jar 文件下载到本地,然后导入maven本地仓库,就可以通过 pom 进行依赖 例如:下载后的 jar 地址 D ...

  3. 【python深入】获取对象类型及属性

    在python中,查看当前的对象所能够调用的所有方法? 查看类型可以通过type,也可以通过isinstance方法,查看属性可以通过dir() 下面是对type的介绍: ————>基本类型的判 ...

  4. Admin注册和路由分发详解

    Admin注册和路由分发详解 1.启动 #autodiscover_modules('admin', register_to=site) 2.注册 1.单例对象 admin.site = AdminS ...

  5. ES之五:ElasticSearch聚合

    前言 说完了ES的索引与检索,接着再介绍一个ES高级功能API – 聚合(Aggregations),聚合功能为ES注入了统计分析的血统,使用户在面对大数据提取统计指标时变得游刃有余.同样的工作,你在 ...

  6. [字符串][NOIP2012]Vigenère密码

    Vigenère密码 题目描述 16世纪法国外交家Blaise de Vigenère设计了一种多表密码加密算法——Vigenère密码.Vigenère密码的加密解密算法简单易用,且破译难度比较高, ...

  7. shell启动执行cypher语句

    1.跳转到目录:cd /data/soft/neo4j-community-not/ 2.修改配置文件:nano ./conf/neo4j.conf: 3. 登录:bin/cypher-shell - ...

  8. wincvs配置方法

    1.安装wincvs_中文1.3.exe 2.安装cvsnt-2.5.03.2260.msi  安装过程中选择complete选项 3.安装python221-setup.exe 安装完成后,先配置环 ...

  9. tensorflow学习之(一)预测一条直线y = 0.1x + 0.3

    #预测一条y = 0.1x + 0.3的直线 import tensorflow as tf import numpy as np #科学计算模块 ''' tf.random_normal([784, ...

  10. ABP框架系列之三十七:(Navigation-导航)

    Every web application has some menu to navigate between pages/screens. ASP.NET Boilerplate provides ...