pytorch官方给的加载数据的方式是已经定义好的dataset以及loader,如何加载自己本地的图片以及label?

形如数据格式为

image1 label1

image2 label2

...

imagen labeln

实验中我采用的数据的格式如下,一个图片的名字对应一个label,每一个label是一个9维的向量

1_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.304295635957 0.952577642997 0.0614006041909 0.0938333659301 -0.995587916479 0.126405046864 -0.999368204665 0.0355414055005 0.382030624629 0.0
1_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.271224474168 0.962516121742 0.061399602839 0.128727689658 -0.991679979588 0.126495313272 -0.999999890616 0.000467726796359 0.381981952872 0.0
1_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.237868729379 0.971297311632 0.0614713240576 0.163626102983 -0.986522426721 0.1265439964 -0.999400990041 -0.0346072406472 0.382020891324 0.0
1.1_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.303575822293 0.95280728383 0.0675229548933 0.0939225945957 -0.995579502714 0.138745857429 -0.999376861795 0.0352971402251 0.410670255038 0.1
1.1_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.270745576918 0.962650940154 0.0674654115238 0.128659340525 -0.991688849436 0.138685653232 -0.999999909615 0.000425170029598 0.410739827476 0.1
1.1_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.23757921143 0.971368168253 0.0674866175928 0.16322766122 -0.986588430204 0.138789623782 -0.999406504329 -0.0344476284471 0.410661183171 0.1
1.2_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.305474635089 0.952200213882 0.0736939767933 0.0939968709874 -0.995572492712 0.150981626608 -0.999370773952 0.0354690875311 0.437620875774 0.2
1.2_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.270346113421 0.962763199836 0.073518963401 0.128433455959 -0.991718129002 0.150964425444 -0.999999924062 0.000389711583812 0.437667827367 0.2
1.2_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.237337349604 0.971427291403 0.0734898449879 0.162895476227 -0.986643331617 0.150931800731 -0.999411541516 -0.0343011761519 0.437608139736 0.2
1.3_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.305514664536 0.952187371137 0.0795990377393 0.0941741911595 -0.995555735115 0.162914965783 -0.999378340534 0.0352552474342 0.462816755558 0.3
1.3_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.272366931798 0.962193459998 0.0796135882128 0.128398130503 -0.991722703221 0.162940731132 -0.999999935257 0.000359841646368 0.462733965419 0.3

...

源程序如下

 import torch
import torch.nn as nn
import math
import os
from PIL import Image
import random
from torchvision import datasets, transforms
import torch.utils.data as data
from torch.autograd import Variable torch.cuda.set_device(0)
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
kwargs = {'num_workers': 1, 'pin_memory': True}
batch_size = 8
# load the data
def random_choose_data(label_path):
random.seed(1)
file = open(label_path)
lines = file.readlines()
slice_initial = random.sample(lines, 200000) # if don't change this ,it will be all the same
slice = list(set(lines)-set(slice_initial))
random.shuffle(slice) train_label = slice[:150000]
test_label = slice[150000:200000]
return train_label, test_label # output the list and delvery it into ImageFolder # def my data loader, return the data and corresponding label
def default_loader(path):
return Image.open(path).convert('RGB') # operation object is the PIL image object class myImageFloder(data.Dataset): # Class inheritance
def __init__(self, root, label, transform=None, target_transform=None, loader=default_loader):
# fh = open(label)
c = 0
imgs = []
class_names = ['regression']
for line in label: # label is a list
cls = line.split() # cls is a list
fn = cls.pop(0)
if os.path.isfile(os.path.join(root, fn)):
imgs.append((fn, tuple([float(v) for v in cls[:len(cls)-1]])))
# access the last label
# images is the list,and the content is the tuple, every image corresponds to a label
# despite the label's dimension
# we can use the append way to append the element for list
c = c + 1
print('the total image is',c)
print(class_names)
self.root = root
self.imgs = imgs
self.classes = class_names
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
fn, label = self.imgs[index] # even though the imgs is just a list, it can return the elements of it
# in a proper way
img = self.loader(os.path.join(self.root, fn))
if self.transform is not None:
img = self.transform(img)
return img, torch.Tensor(label), fn def __len__(self):
return len(self.imgs) def getName(self):
return self.classes mytransform = transforms.Compose([transforms.ToTensor()]) # transform [0,255] to [0,1]
test_data_root = "/home/ying/data/google_streetview_train_test1"
data_label = "/home/ying/data/google_streetview_train_test1/label.txt"
# test_label="/home/ying/data/google_streetview_train_test1/label.txt"
train_label, test_label = random_choose_data(data_label)
test_loader = torch.utils.data.DataLoader(
myImageFloder(root=test_data_root, label=test_label, transform=mytransform),batch_size=batch_size, shuffle=True, **kwargs) def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) class BasicBlock(nn.Module):
expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride def forward(self, x):
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class Bottleneck(nn.Module):
expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) # decrease the channel, does't change size
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride def forward(self, x):
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out) out = self.conv3(out)
out = self.bn3(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=9):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False) # the size become 1/2
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # the size become 1/2
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Linear(2048, num_classes) for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1):
# block: object, planes: output channel, blocks: the num of blocks
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion # the input channel num become 4 times
for i in range(1, blocks):
layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x) x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x def resnet50(pretrained = True):
"""Constructs a ResNet-50 model. Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
model.load_state_dict(torch.load('./resnet50_20170907_state_dict.pth'))
return model
cnn = resnet50(pretrained=True) # the output number is 9
cnn.cuda()
cnn.eval()
criterion = nn.MSELoss().cuda() for i, (test_images, test_labels, fn) in enumerate(test_loader): # the first i in index, and the () is the content
test_images = Variable(test_images.cuda())
test_labels = Variable(test_labels.cuda())
outputs = cnn(test_images)
print(outputs.data[0])
print(fn)
loss = criterion(outputs, test_labels)
print("Iter [%d/%d] Test_Loss: %.4f" % (i + 1, 781, loss.data[0]))

着重看定义dataloader以及返回图像名称的一段代码:

 def random_choose_data(label_path):
random.seed(1)
file = open(label_path)
lines = file.readlines()
slice_initial = random.sample(lines, 200000) # if don't change this ,it will be all the same
slice = list(set(lines)-set(slice_initial))
random.shuffle(slice) train_label = slice[:150000]
test_label = slice[150000:200000]
return train_label, test_label # output the list and delvery it into ImageFolder # def my data loader, return the data and corresponding label
def default_loader(path):
return Image.open(path).convert('RGB') # operation object is the PIL image object class myImageFloder(data.Dataset): # Class inheritance,继承Dataset类
def __init__(self, root, label, transform=None, target_transform=None, loader=default_loader):
# fh = open(label)
c = 0
imgs = []
class_names = ['regression']
for line in label: # label is a list
cls = line.split() # cls is a list
fn = cls.pop(0)
if os.path.isfile(os.path.join(root, fn)):
imgs.append((fn, tuple([float(v) for v in cls[:len(cls)-1]])))
# access the last label
# images is the list,and the content is the tuple, every image corresponds to a label
# despite the label's dimension
# we can use the append way to append the element for list
c = c + 1
print('the total image is',c)
print(class_names)
self.root = root
self.imgs = imgs
self.classes = class_names
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
fn, label = self.imgs[index] # even though the imgs is just a list, it can return the elements of it
# in a proper way
img = self.loader(os.path.join(self.root, fn))
if self.transform is not None:
img = self.transform(img)
return img, torch.Tensor(label), fn  # 在这里返回图像数据以及对应的label以及对应的名称 def __len__(self):
return len(self.imgs) def getName(self):
return self.classes

实际上是继承Dataset这个类中的两个函数__getitem__与__len__,并且返回的变量类型是torch.Tensor即可

看dataloader定义方式以及如何在dataloader中加载数据

 mytransform = transforms.Compose([transforms.ToTensor()])  # transform [0,255] to [0,1]
test_data_root = "/home/ying/data/google_streetview_train_test1"
data_label = "/home/ying/data/google_streetview_train_test1/label.txt"
# test_label="/home/ying/data/google_streetview_train_test1/label.txt"
train_label, test_label = random_choose_data(data_label)
test_loader = torch.utils.data.DataLoader(
myImageFloder(root=test_data_root, label=test_label, transform=mytransform),batch_size=batch_size, shuffle=True, **kwargs)
...
for i, (test_images, test_labels, fn) in enumerate(test_loader): # the first i in index, and the () is the content
test_images = Variable(test_images.cuda())
test_labels = Variable(test_labels.cuda())
outputs = cnn(test_images)
print(outputs.data[0])
print(fn)
loss = criterion(outputs, test_labels)
print("Iter [%d/%d] Test_Loss: %.4f" % (i + 1, 781, loss.data[0]))

实际上刚刚在myImageFloder中定义的__getitem__实际上就是i, (test_images, test_labels, fn) in enumerate(test_loader): 中返回的对象, 其中第一个i是与enumberate相关的index

这样就能够在模型test的时候观察哪些数据误差比较大并且进行输出

Pytorch自定义dataloader以及在迭代过程中返回image的name的更多相关文章

  1. [pytorch修改]dataloader.py 实现darknet中的subdivision功能

    dataloader.py import random import torch import torch.multiprocessing as multiprocessing from torch. ...

  2. PyTorch 之 DataLoader

    DataLoader DataLoader 是 PyTorch 中读取数据的一个重要接口,该接口定义在 dataloader.py 文件中,该接口的目的: 将自定义的 Dataset 根据 batch ...

  3. mxnet自定义dataloader加载自己的数据

    实际上关于pytorch加载自己的数据之前有写过一篇博客,但是最近接触了mxnet,发现关于这方面的教程很少 如果要加载自己定义的数据的话,看mxnet关于mnist基本上能够推测12 看pytorc ...

  4. 自定义 DataLoader

    自定义 DataLoader 如 数据输入 一文所介绍,OneFlow 支持两种数据加载方式:直接使用 NumPy 数据或者使用 DataLoader 及其相关算子. 在大型工业场景下,数据加载容易成 ...

  5. asp.net MVC控制器中返回JSON格式的数据时提示下载

    Asp.net mvc在接收的是JSON格式的数据,但是奇怪的是在IE中提示下载文件,其他浏览器中一切正常,下载后,里面的内容就是在控制器中返回的数据.代码如下: 视图中js代码: $("# ...

  6. PowerShell_零基础自学课程_5_自定义PowerShell环境及Powershell中的基本概念

    PowerShell_零基础自学课程_5_自定义PowerShell环境及Powershell中的基本概念 据我个人所知,windows下的cmd shell除了能够通过修改系统参数来对其中的环境变量 ...

  7. 在.Net MVC中自定义ValidationAttribute标签对Model中的属性做验证

    写一个继承与ValidationAttribute类的自定义的验证方法 MVC中传递数据时,大多数都会用Model承载数据,并且在传到控制器后,对Model进行一系列的验证. 我平时经常使用的判断方法 ...

  8. jsp 自定义标签解决jsp页面中int时间戳的时间格式化问题

    jsp 自定义标签解决jsp页面中int时间戳的时间格式化问题 之前在项目中根据需求,需要自定义标签,经过查询w3c文档,自己也踩了一些坑,特此记录自定义标签的步骤,下面就以我之前的一个例子中的定义一 ...

  9. Asp.net 自定义CustomerSession 存放到Redis中

    首先,引用 Redis 操作驱动组件:StackExchange.Redis.dll. 继承SessionStateStoreProviderBase 类, 实现方法: using System; u ...

随机推荐

  1. 第2天【OS Linux发行版介绍、Linux系统基础使用入门、Linux命令帮助、Linux基础命令】

    Logout    退出系统 Gedit     文本编辑器工具 Uname –r 查看内核版本信息,uname –a 比较详细 Cat /proc/cpuinfo      查看CPU Cat /p ...

  2. hosts.allow和hosts.deny支持哪些服务

    一.背景简介 在linux上多用iptables来限制ssh和telnet,编缉hosts.allow和hosts.deny感觉比较麻烦比较少用. aix没有iptables且和linux有诸多不同, ...

  3. view的focusable属性改变设置是否可获取光标

    注意图中我画的箭头,当时鼠标点击的黑色圈圈的位置,然后按钮出现了按下的效果(黄色的描边) 刚开始看到这种效果很是好奇,不知道是怎么实现的,后来仔细一想,应该是整个啤酒罐是一张图片(ImageView) ...

  4. 0-MAVEN SETTING

    localRepository : 表示本地库的保存位置,也就是maven主要的jar包保存位置,默认在${user.dir}/.m2/repository,如果需要另外设置,就换成其他的路径 Ser ...

  5. 尚学堂java 参考答案 第九章

    一.选择题 1.AC 解析:A.注意题目是Collections不是 Collection,前者是一个until下的类,后者才是接口 C.Set中的数据是无序且不能重复的 2.A 解析:将发生数组越界 ...

  6. bzoj2301

    题解: 莫比乌斯反演 再加上一个分块 然后和上一题差不多了 代码: #include<cstdio> #include<cmath> #include<algorithm ...

  7. day3-PyCharm 断点 调试模式

    上篇学习了Python的工具选择,PyCharm的基本设置,简单的了解了下PyCharm的调试模式,今天主要学习下PyCharm的调式模式,在以后的开发中是一个非常重要的工具. [运行]和[调试]前的 ...

  8. [HDU2475]Box

    Problem 先告诉你每个盒子在哪个盒子的内部 接下来有M个操作: 可以把一个盒子及里面的盒子移到另外一个盒子的内部 或者询问你某个盒子最外面的盒子是哪个 Solution 首先可以建成一个图,然后 ...

  9. day34 线程池 协程

    今日内容: 1. 线程的其他方法 2.线程队列(重点) 3.线程池(重点) 4.协程 1.线程的其他方法 语法: Threading.current_thread() # 当前正在运行的线程对象的一个 ...

  10. 每天CSS学习之border-spacing

    border-spacing是CSS2的一个属性.其作用是规定表格的相邻单元格边框之间的距离.如果表格的border-collapse属性值为collapse时,border-spacing设置无效. ...