pytorch官方给的加载数据的方式是已经定义好的dataset以及loader,如何加载自己本地的图片以及label?

形如数据格式为

image1 label1

image2 label2

...

imagen labeln

实验中我采用的数据的格式如下,一个图片的名字对应一个label,每一个label是一个9维的向量

1_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.304295635957 0.952577642997 0.0614006041909 0.0938333659301 -0.995587916479 0.126405046864 -0.999368204665 0.0355414055005 0.382030624629 0.0
1_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.271224474168 0.962516121742 0.061399602839 0.128727689658 -0.991679979588 0.126495313272 -0.999999890616 0.000467726796359 0.381981952872 0.0
1_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.237868729379 0.971297311632 0.0614713240576 0.163626102983 -0.986522426721 0.1265439964 -0.999400990041 -0.0346072406472 0.382020891324 0.0
1.1_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.303575822293 0.95280728383 0.0675229548933 0.0939225945957 -0.995579502714 0.138745857429 -0.999376861795 0.0352971402251 0.410670255038 0.1
1.1_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.270745576918 0.962650940154 0.0674654115238 0.128659340525 -0.991688849436 0.138685653232 -0.999999909615 0.000425170029598 0.410739827476 0.1
1.1_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.23757921143 0.971368168253 0.0674866175928 0.16322766122 -0.986588430204 0.138789623782 -0.999406504329 -0.0344476284471 0.410661183171 0.1
1.2_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.305474635089 0.952200213882 0.0736939767933 0.0939968709874 -0.995572492712 0.150981626608 -0.999370773952 0.0354690875311 0.437620875774 0.2
1.2_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.270346113421 0.962763199836 0.073518963401 0.128433455959 -0.991718129002 0.150964425444 -0.999999924062 0.000389711583812 0.437667827367 0.2
1.2_2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.237337349604 0.971427291403 0.0734898449879 0.162895476227 -0.986643331617 0.150931800731 -0.999411541516 -0.0343011761519 0.437608139736 0.2
1.3_-2_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.305514664536 0.952187371137 0.0795990377393 0.0941741911595 -0.995555735115 0.162914965783 -0.999378340534 0.0352552474342 0.462816755558 0.3
1.3_0_pitch_100_yaw_0_lat_29.7553171_lng_-95.3675684.jpg 0.272366931798 0.962193459998 0.0796135882128 0.128398130503 -0.991722703221 0.162940731132 -0.999999935257 0.000359841646368 0.462733965419 0.3

...

源程序如下

 import torch
import torch.nn as nn
import math
import os
from PIL import Image
import random
from torchvision import datasets, transforms
import torch.utils.data as data
from torch.autograd import Variable torch.cuda.set_device(0)
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
kwargs = {'num_workers': 1, 'pin_memory': True}
batch_size = 8
# load the data
def random_choose_data(label_path):
random.seed(1)
file = open(label_path)
lines = file.readlines()
slice_initial = random.sample(lines, 200000) # if don't change this ,it will be all the same
slice = list(set(lines)-set(slice_initial))
random.shuffle(slice) train_label = slice[:150000]
test_label = slice[150000:200000]
return train_label, test_label # output the list and delvery it into ImageFolder # def my data loader, return the data and corresponding label
def default_loader(path):
return Image.open(path).convert('RGB') # operation object is the PIL image object class myImageFloder(data.Dataset): # Class inheritance
def __init__(self, root, label, transform=None, target_transform=None, loader=default_loader):
# fh = open(label)
c = 0
imgs = []
class_names = ['regression']
for line in label: # label is a list
cls = line.split() # cls is a list
fn = cls.pop(0)
if os.path.isfile(os.path.join(root, fn)):
imgs.append((fn, tuple([float(v) for v in cls[:len(cls)-1]])))
# access the last label
# images is the list,and the content is the tuple, every image corresponds to a label
# despite the label's dimension
# we can use the append way to append the element for list
c = c + 1
print('the total image is',c)
print(class_names)
self.root = root
self.imgs = imgs
self.classes = class_names
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
fn, label = self.imgs[index] # even though the imgs is just a list, it can return the elements of it
# in a proper way
img = self.loader(os.path.join(self.root, fn))
if self.transform is not None:
img = self.transform(img)
return img, torch.Tensor(label), fn def __len__(self):
return len(self.imgs) def getName(self):
return self.classes mytransform = transforms.Compose([transforms.ToTensor()]) # transform [0,255] to [0,1]
test_data_root = "/home/ying/data/google_streetview_train_test1"
data_label = "/home/ying/data/google_streetview_train_test1/label.txt"
# test_label="/home/ying/data/google_streetview_train_test1/label.txt"
train_label, test_label = random_choose_data(data_label)
test_loader = torch.utils.data.DataLoader(
myImageFloder(root=test_data_root, label=test_label, transform=mytransform),batch_size=batch_size, shuffle=True, **kwargs) def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) class BasicBlock(nn.Module):
expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride def forward(self, x):
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class Bottleneck(nn.Module):
expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) # decrease the channel, does't change size
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride def forward(self, x):
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out) out = self.conv3(out)
out = self.bn3(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=9):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False) # the size become 1/2
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # the size become 1/2
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
self.fc = nn.Linear(2048, num_classes) for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1):
# block: object, planes: output channel, blocks: the num of blocks
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion # the input channel num become 4 times
for i in range(1, blocks):
layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x) x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x def resnet50(pretrained = True):
"""Constructs a ResNet-50 model. Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
# model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
model.load_state_dict(torch.load('./resnet50_20170907_state_dict.pth'))
return model
cnn = resnet50(pretrained=True) # the output number is 9
cnn.cuda()
cnn.eval()
criterion = nn.MSELoss().cuda() for i, (test_images, test_labels, fn) in enumerate(test_loader): # the first i in index, and the () is the content
test_images = Variable(test_images.cuda())
test_labels = Variable(test_labels.cuda())
outputs = cnn(test_images)
print(outputs.data[0])
print(fn)
loss = criterion(outputs, test_labels)
print("Iter [%d/%d] Test_Loss: %.4f" % (i + 1, 781, loss.data[0]))

着重看定义dataloader以及返回图像名称的一段代码:

 def random_choose_data(label_path):
random.seed(1)
file = open(label_path)
lines = file.readlines()
slice_initial = random.sample(lines, 200000) # if don't change this ,it will be all the same
slice = list(set(lines)-set(slice_initial))
random.shuffle(slice) train_label = slice[:150000]
test_label = slice[150000:200000]
return train_label, test_label # output the list and delvery it into ImageFolder # def my data loader, return the data and corresponding label
def default_loader(path):
return Image.open(path).convert('RGB') # operation object is the PIL image object class myImageFloder(data.Dataset): # Class inheritance,继承Dataset类
def __init__(self, root, label, transform=None, target_transform=None, loader=default_loader):
# fh = open(label)
c = 0
imgs = []
class_names = ['regression']
for line in label: # label is a list
cls = line.split() # cls is a list
fn = cls.pop(0)
if os.path.isfile(os.path.join(root, fn)):
imgs.append((fn, tuple([float(v) for v in cls[:len(cls)-1]])))
# access the last label
# images is the list,and the content is the tuple, every image corresponds to a label
# despite the label's dimension
# we can use the append way to append the element for list
c = c + 1
print('the total image is',c)
print(class_names)
self.root = root
self.imgs = imgs
self.classes = class_names
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
fn, label = self.imgs[index] # even though the imgs is just a list, it can return the elements of it
# in a proper way
img = self.loader(os.path.join(self.root, fn))
if self.transform is not None:
img = self.transform(img)
return img, torch.Tensor(label), fn  # 在这里返回图像数据以及对应的label以及对应的名称 def __len__(self):
return len(self.imgs) def getName(self):
return self.classes

实际上是继承Dataset这个类中的两个函数__getitem__与__len__,并且返回的变量类型是torch.Tensor即可

看dataloader定义方式以及如何在dataloader中加载数据

 mytransform = transforms.Compose([transforms.ToTensor()])  # transform [0,255] to [0,1]
test_data_root = "/home/ying/data/google_streetview_train_test1"
data_label = "/home/ying/data/google_streetview_train_test1/label.txt"
# test_label="/home/ying/data/google_streetview_train_test1/label.txt"
train_label, test_label = random_choose_data(data_label)
test_loader = torch.utils.data.DataLoader(
myImageFloder(root=test_data_root, label=test_label, transform=mytransform),batch_size=batch_size, shuffle=True, **kwargs)
...
for i, (test_images, test_labels, fn) in enumerate(test_loader): # the first i in index, and the () is the content
test_images = Variable(test_images.cuda())
test_labels = Variable(test_labels.cuda())
outputs = cnn(test_images)
print(outputs.data[0])
print(fn)
loss = criterion(outputs, test_labels)
print("Iter [%d/%d] Test_Loss: %.4f" % (i + 1, 781, loss.data[0]))

实际上刚刚在myImageFloder中定义的__getitem__实际上就是i, (test_images, test_labels, fn) in enumerate(test_loader): 中返回的对象, 其中第一个i是与enumberate相关的index

这样就能够在模型test的时候观察哪些数据误差比较大并且进行输出

Pytorch自定义dataloader以及在迭代过程中返回image的name的更多相关文章

  1. [pytorch修改]dataloader.py 实现darknet中的subdivision功能

    dataloader.py import random import torch import torch.multiprocessing as multiprocessing from torch. ...

  2. PyTorch 之 DataLoader

    DataLoader DataLoader 是 PyTorch 中读取数据的一个重要接口,该接口定义在 dataloader.py 文件中,该接口的目的: 将自定义的 Dataset 根据 batch ...

  3. mxnet自定义dataloader加载自己的数据

    实际上关于pytorch加载自己的数据之前有写过一篇博客,但是最近接触了mxnet,发现关于这方面的教程很少 如果要加载自己定义的数据的话,看mxnet关于mnist基本上能够推测12 看pytorc ...

  4. 自定义 DataLoader

    自定义 DataLoader 如 数据输入 一文所介绍,OneFlow 支持两种数据加载方式:直接使用 NumPy 数据或者使用 DataLoader 及其相关算子. 在大型工业场景下,数据加载容易成 ...

  5. asp.net MVC控制器中返回JSON格式的数据时提示下载

    Asp.net mvc在接收的是JSON格式的数据,但是奇怪的是在IE中提示下载文件,其他浏览器中一切正常,下载后,里面的内容就是在控制器中返回的数据.代码如下: 视图中js代码: $("# ...

  6. PowerShell_零基础自学课程_5_自定义PowerShell环境及Powershell中的基本概念

    PowerShell_零基础自学课程_5_自定义PowerShell环境及Powershell中的基本概念 据我个人所知,windows下的cmd shell除了能够通过修改系统参数来对其中的环境变量 ...

  7. 在.Net MVC中自定义ValidationAttribute标签对Model中的属性做验证

    写一个继承与ValidationAttribute类的自定义的验证方法 MVC中传递数据时,大多数都会用Model承载数据,并且在传到控制器后,对Model进行一系列的验证. 我平时经常使用的判断方法 ...

  8. jsp 自定义标签解决jsp页面中int时间戳的时间格式化问题

    jsp 自定义标签解决jsp页面中int时间戳的时间格式化问题 之前在项目中根据需求,需要自定义标签,经过查询w3c文档,自己也踩了一些坑,特此记录自定义标签的步骤,下面就以我之前的一个例子中的定义一 ...

  9. Asp.net 自定义CustomerSession 存放到Redis中

    首先,引用 Redis 操作驱动组件:StackExchange.Redis.dll. 继承SessionStateStoreProviderBase 类, 实现方法: using System; u ...

随机推荐

  1. -bash: /etc/init.d/nginx: /bin/bash^M: bad interpreter: No such file or directory

    -bash: /etc/init.d/nginx: /bin/bash^M:bad interpreter: No such file or directory 这个使为了弄nginx自启的,然后在官 ...

  2. CP-ABE的使用

    参考: http://acsc.cs.utexas.edu/cpabe/tutorial.html http://acsc.cs.utexas.edu/cpabe/ 事先先配置好cp-abe:http ...

  3. P2P通信中使用ENet提供UDP的可靠传输

    ENet官网:http://enet.bespin.org/ 按照他的说法: ENet's purpose is to provide a relatively thin, simple and ro ...

  4. php企业建站源码

    php企业建站源码 <?php session_start(); include "./admin/config.php"; include "./right/sq ...

  5. vertical-align属性测试实验面板 文字 图片对齐

    转自:http://www.zhangxinxu.com/study/201005/verticle-align-test-demo.html

  6. 转 /etc/ld.so.conf.d/目录下文件的作用

    在了解/etc/ld.so.conf.d/目录下文件的作用之前,先介绍下程序运行是加载动态库的几种方法:第一种,通过ldconfig命令    ldconfig命令的用途, 主要是在默认搜寻目录(/l ...

  7. java集合框架图

  8. Introduction to Cryto & Crptocurrencies Lecture 1

    Lecture 1.2 Hash Pointer & Data Structure Use Case 1. 什么是Block Chain呢? 想象一个像链表一样的结构,只不过与通常的指向下一块 ...

  9. console.log()显示图片以及为文字加样式

    有兴趣的同学可以文章最后的代码复制贴到控制台玩玩. Go for Code 在正常模式下,一般只能向console 控制台输出简单的文字信息.但为了把信息输出得更优雅更便于阅读,除了cosole.lo ...

  10. OO Summary Ⅳ

    测试与正确性论证的效果差异 测试,或者说用断言进行黑箱测试,用大量的数据进行“覆盖性测试”,目的是当分支覆盖率达到100%也就是理论上来说所有可能的输入都已经测试过了,而输出结果均是正确的,那么我们理 ...