torch文档学习笔记
下面为官方文档学习笔记 http://pytorch.org/docs/0.3.0/index.html
1、torch.Tensor
from __future__ import print_function
import torch
import numpy as np
import pandas as pd
from pandas import Series,DataFrame ################Tensors Tensors Tensors##################################
x=torch.Tensor(5,3)
torch.is_tensor(x)
torch.is_storage(x)
torch.numel(x) #int
torch.eye(2)
torch.eye(2,3)
torch.from_numpy(np.array([1,2,3])) #torch.IntTensor of size 3
torch.from_numpy(np.array([1.0,2,3])) #torch.DoubleTensor of size 3
print(torch.linspace(0,10,steps=3)) #torch.FloatTensor of size 3 0 5 10
print(torch.linspace(0,10,3)) #torch.FloatTensor of size 3 0 5 10
# steps大于1的整数,start end中间样本点的个数
print(torch.logspace(0,1,4)) #torch.FloatTensor of size 4,类似于linspace 以10为底,10的0次方到10的1次方之间
print(torch.ones(2,2)) #torch.FloatTensor of size 2x2
print(torch.ones(2,2,1,5,8,2,8,10)) #torch.FloatTensor of size 2x2x1x5x8x2x8x10
print(torch.ones_like(x)) #torch.FloatTensor of size 5x3
print(torch.arange(0,3,1)) #torch.FloatTensor of size 3 0 1 2 #可迭代对象
print(torch.zeros(2)) #torch.FloatTensor of size 2 0 0
print(torch.zeros_like(x)) #torch.FloatTensor of size 5x3 ##############Indexing,Slicing,Joining,Mutating Ops##############################
print(torch.cat((x,x,x),0)) #torch.FloatTensor of size 15x3 #concat 连接Tensor
print(torch.cat((x,x,x),1)) #torch.FloatTensor of size 5x9
print(torch.chunk(x,2)) #截断,结果为元组,元组内元素个数为截断个数
# (
# 0 0 0
# 0 0 0
# 0 0 0
# [torch.FloatTensor of size 3x3]
# ,
# 1.00000e-37 *
# 0.0000 0.0000 0.0000
# 0.0000 7.4058 0.0000
# [torch.FloatTensor of size 2x3]
# ) t=torch.Tensor([[1,2],[3,4]])
# torch.gather(input, dim, index, out=None) → Tensor dim选择横向还是纵向的
print(torch.gather(t,0,torch.LongTensor([[0,0],[0,0]]))) #torch.FloatTensor of size 2x2
# 1 2
# 1 2
print(torch.gather(t,1,torch.LongTensor([[0,0],[0,0]]))) #torch.FloatTensor of size 2x2
# 1 1
# 3 3 # torch.index_select(input, dim, index, out=None) → Tensor #选择几行或者几列,如果要同时选列和行,可以分两步
#注意和前面gather一样,index数据类型为LongTensor
print(torch.index_select(t,0,torch.LongTensor([0]))) #torch.FloatTensor of size 1x2 1 2
print(torch.index_select(t,1,torch.LongTensor([0]))) #torch.FloatTensor of size 2x1 1 3
print(torch.index_select(t,1,torch.LongTensor([0,1]))) #torch.FloatTensor of size 2x2
# 1 2
# 3 4
print(t.ge(1)) #torch.ByteTensor of size 2x2 判断t中元素是否大于等于1
# 1 1
# 1 1
print(t.ge(0.5)) #torch.ByteTensor of size 2x2
# 1 1
# 1 1
print(t.ge(2.5)) #torch.ByteTensor of size 2x2
# 0 0
# 1 1
print(t.ge(5)) #torch.ByteTensor of size 2x2
# 0 0
# 0 0 #torch.masked_select(input, mask, out=None) → Tensor 标记选择,即定向选择
print(torch.masked_select(t,t.ge(2.5))) #torch.FloatTensor of size 2
# 3 4
for i in torch.masked_select(t,t.ge(1.5)): #可迭代对象
print(i)
# 2.0
# 3.0
# 4.0 # torch.nonzero(input, out=None) → LongTensor 得到非0元素所在位置N*2,其中N为非0元素个数
print(torch.nonzero(torch.Tensor([1,2,3,0,4]))) #torch.LongTensor of size 4x1
#
#
#
#
non_zero=torch.nonzero(torch.Tensor([[0.6, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.0, 0.0, 1.2, 0.0],
[0.0, 0.0, 0.0,-0.4]]))
print(non_zero) #torch.LongTensor of size 4x2
# 0 0
# 1 1
# 2 2
# 3 3
non_zero=torch.nonzero(torch.Tensor([[0.6, 0.0, 0.0, 0.0],
[2, 0.4, 0.0, 0.0],
[3, 0.0, 1.2, 0.0],
[4, 0.0, 0.0,-0.4]]))
print(non_zero) #torch.LongTensor of size 4x2
# 0 0
# 1 0
# 1 1
# 2 0
# 2 2
# 3 0 #torch.split(tensor, split_size, dim=0) 与前面torch.chunk类似
print(torch.split(x,2))
# (
# 0 0 0
# 0 0 0
# [torch.FloatTensor of size 2x3]
# ,
# 1.00000e-42 *
# 0.0000 0.0000 0.0000
# 0.0000 2.0011 0.0000
# [torch.FloatTensor of size 2x3]
# ,
# 1.00000e-38 *
# 0.0000 1.1112 0.0000
# [torch.FloatTensor of size 1x3]
# ) #torch.squeeze(input, dim=None, out=None)
# 将某一方向全部相同数据压缩掉,想想一下,x、y、z是三维的,如果z全为0,则可转化为二维。
torch_squeeze=torch.zeros(2,1,2,1,2)
print(torch_squeeze) #torch.FloatTensor of size 2x1x2x1x2
print(torch_squeeze.size()) #torch.Size([2, 1, 2, 1, 2])
squeeze_one=torch.squeeze(torch_squeeze)
print(squeeze_one) #torch.FloatTensor of size 2x2x2
squeeze_one=torch.squeeze(torch_squeeze,-2) #可用[-5, 4]等
print(squeeze_one) #torch.FloatTensor of size 2x1x2x2
squeeze_one=torch.squeeze(torch_squeeze,1)
print(squeeze_one) #torch.FloatTensor of size 2x2x1x2 #torch.stack(sequence, dim=0, out=None)
# Concatenates sequence of tensors along a new dimension.
print(x)
print(torch.stack((x,x),1));print(torch.stack([x,x],dim=2) #torch.t(input,out=None) → Tensor 转置
print(torch.t(x)) #torch.FloatTensor of size 3x5
#print(torch.t(torch.Tensor(1,2,3,4))) #RuntimeError: t() expects a 2D tensor, but self is 4D #torch.take(input,indices) → Tensor 把input当做1D tensor,按照indices来选择元素
print(torch.take(x,torch.LongTensor([0,2,5]))) #torch.FloatTensor of size 3 #torch.transpose(input,dim0,dim1,out=None) → Tensor
y=torch.Tensor(1,2,3,4)
print(y) #torch.FloatTensor of size 1x2x3x4
print(torch.transpose(y,1,3)) #torch.FloatTensor of size 1x4x3x2 对调,从1*2*3*4——>1*4*3*2 #torch.unbind(tensor,dim=0) 移出tensor中的一维 removes a tensor dimension
#返回移出后的元组
print(torch.unbind(y)) #torch.FloatTensor of size 2x3x4 默认dim=0
print(torch.unbind(y,2)) #torch.FloatTensor of size 1x2x4 #torch.unsqueeze(input,dim,out=None)
m=torch.Tensor([1,2,3,4])
print(m) #torch.FloatTensor of size 4
m_zero=torch.unsqueeze(m,0)
print(m_zero) #torch.FloatTensor of size 1x4
m_one=torch.unsqueeze(m,1)
print(m_one) #torch.FloatTensor of size 4x1 m_zero_to_m=torch.squeeze(m_zero)
print(m_zero_to_m) #torch.FloatTensor of size 4
print(m==m_zero_to_m) #torch.ByteTensor of size 4
#
#
#
#
print(m.equal(m_zero_to_m)) #True
2、Random sampling
import torch
################ Random sampling ##################################
print(torch.manual_seed(1)) #<torch._C.Generator object at 0x0000023ED56BD470>
print(torch.manual_seed(2)) #<torch._C.Generator object at 0x0000023F7532D470>
print(torch.initial_seed()) #2 返回初始生成的随机数字
print(torch.get_rng_state()) #torch.ByteTensor of size 5048 #torch.set_rng_state(new_state)
print(torch.set_rng_state(torch.get_rng_state())) #设置随机数生成状态,返回为None #torch.bernoulli(input,out=None) → Tensor 从伯努利分布中刻画二项分布(0或1)随机数
a=torch.Tensor(3,3).uniform_(0,1) #torch.FloatTensor of size 3x3
#先生成随机3*3Tensor,再通过uniform转换
print(torch.bernoulli(a)) #torch.FloatTensor of size 3x3
# 1 1 1
# 0 1 0
# 0 0 1
a=torch.ones(3,3)
print(torch.bernoulli(a)) #torch.FloatTensor of size 3x3
# 1 1 1
# 1 1 1
# 1 1 1 #torch.multinomial(input, num_samples, replacement=False, out=None) → LongTensor
#返回一个张量,每行包含num_samples指数多项式概率分布位于张量输入相应的行采样。
#输入行不需要求和(在这种情况下,我们使用值作为权重),但必须是非负的,并且有一个非零和。
#看了半天觉得比较复杂,等以后用时再看
print(torch.multinomial(a,3)) #torch.LongTensor of size 3x3
# 2 0 1
# 2 0 1
# 0 2 1
print(torch.multinomial(torch.Tensor([1,2,3]),3)) #torch.LongTensor of size 3
# 2 1 0
print(torch.multinomial(torch.Tensor([1,2,3,3]),4)) #torch.LongTensor of size 3
# 3 1 2 0 print(torch.multinomial(torch.Tensor([1.0,2,3]),3)) #torch.LongTensor of size 3
# 1 2 0
print(torch.multinomial(torch.Tensor([1,2.0,3,3]),4)) #torch.LongTensor of size 3
# 3 1 2 0 #torch.normal(means, std, out=None)
print(torch.normal(means=torch.arange(1,6),std=torch.arange(0.85,0,-0.2))) #torch.FloatTensor of size 5
#注意要保持means和std两个size相同
# 1.2742
# 2.5393
# 3.3374
# 4.2307
# 4.9896
print(torch.normal(mean=0,std=torch.arange(0.85,0,-0.2))) #torch.FloatTensor of size 5
#注意前面是means后面是mean
# -0.7768
# -0.1913
# -0.3296
# 0.3402
# 0.0021
x=torch.normal(std=torch.arange(0.85,0,-0.2))
print(x) #torch.FloatTensor of size 5
print(x.mean()) #0.4534170083701611
x=torch.normal(means=torch.arange(0.85,0,-0.2))
print(x) #torch.FloatTensor of size 5
print(x.mean()) #0.5901669651269913 #torch.rand(*sizes, out=None) → Tensor #0到1之间均匀分布
print(torch.rand(4)) #torch.FloatTensor of size 4
# 0.6558
# 0.2958
# 0.0541
# 0.6938
print(torch.rand(2,3)) #torch.FloatTensor of size 2x3
# 0.7529 0.6873 0.0716
# 0.9869 0.4623 0.0241 #torch.randperm(n, out=None) → LongTensor
#返回随机置换后的整数。
print(torch.randperm(4)) #torch.LongTensor of size 4
#
#
#
# #还有一些在张量上定义的随机抽样函数。
# torch.Tensor.bernoulli_() - in-place version of torch.bernoulli()
# torch.Tensor.cauchy_() - numbers drawn from the Cauchy distribution
# torch.Tensor.exponential_() - numbers drawn from the exponential distribution
# torch.Tensor.geometric_() - elements drawn from the geometric distribution
# torch.Tensor.log_normal_() - samples from the log-normal distribution
# torch.Tensor.normal_() - in-place version of torch.normal()
# torch.Tensor.random_() - numbers sampled from the discrete uniform distribution
# torch.Tensor.uniform_() - numbers sampled from the uniform distribution
3、Serialization 序列化、Parallelism平行运算和Math operations 数学运算
from __future__ import print_function
import torch
import numpy as np
import pandas as pd
from pandas import Series,DataFrame ################ Serialization ##################################
#torch.save(the_model,PATH)
#torch.load('tensors.pt')
#torch.load(PATH) ################ Parallelism ##################################
#torch.get_num_threads() → int
#torch.set_num_threads(int) ################ Math operations ##################################
#torch.abs(input, out=None) → Tensor 求绝对值
#torch.add(input, value, out=None) out=tensor+value
#torch.add(input, value=1, other, out=None) out=input+(other∗value)
#torch.mul(input,value,out=None) #相乘,value可以为数字,也可以为与t1元素个数相同Tensor
a_tensor=torch.IntTensor([1,2,3]) #[torch.IntTensor of size 3]
print(torch.dot(a_tensor,a_tensor)) #14 a_tensor不能为size numb*numb,只能为size numb
print(torch.mul(a_tensor,a_tensor)) #1 4 9 [torch.IntTensor of size 3]
#torch.div(input, value, out=None) #与mul类似 相除,如果是IntTensor类型,只保留整数部分,小数部分舍去(注意不是四舍五入)
#torch.ceil(input, out=None) → Tensor 向上取整,input类型不能为IntTensor,FloatTensor可以
#torch.erfinv(tensor, out=None) → Tensor 反误差函数,x区间为[-inf,inf],y区间为[-1,1],即给的是y,求x值
#torch.fmod(input, divisor, out=None) → Tensor 求元素mod,即求余数 % 即可 与下面torch.remainder相同,没有取余计算,// 不可以
# torch.frac(tensor, out=None) → Tensor 求每个元素小数部分
# torch.exp(tensor, out=None) → Tensor 求每个元素的指数值
#torch.log(input, out=None) → Tensor 求每个元素对数值
#torch.log1p(input, out=None) → Tensor yi=log(xi+1)
#torch.neg(input, out=None) → Tensor out=−1∗input 可以直接在前面加负号 如: -a_tensor
#torch.pow(input, exponent, out=None) outi=x(i)的exponent次方 或者out(i)=x(i)的exponent(i)次方 torch.pow(torch.Tensor([1,3]),torch.Tensor([1,2])) #1 9 numpy中为np.power()
#torch.pow(base, input, out=None) #outi=base的input(i) 次方 base (float),input (Tensor) 例如:torch.pow(2,torch.Tensor([1,2]) #2 4
#torch.reciprocal(input, out=None) → Tensor 1.0/x 求Tensor的倒数 也可以1/input
#torch.remainder(input, divisor, out=None) → Tensor 求余数 input (Tensor),The dividend divisor (Tensor or float)
#torch.round(input, out=None) → Tensor 四舍五入
#torch.sqrt(input, out=None) → Tensor 求元素的平方根
#torch.rsqrt(input, out=None) → Tensor 求元素平方根倒数,负数返回结果为nan
#torch.sigmoid(input, out=None) → Tensor 求元素的sigmod值,位于0到1之间,S型函数
#torch.trunc(input, out=None) → Tensor 取元素整数部分 x=torch.Tensor([-1,-2,3])
t=torch.ones(3,2)
t1=torch.ones(1,6)
t2=torch.ones(6,1)
#torch.add(input, value, out=None) out=tensor+value
print(torch.add(x,20))#每一个都加20
#
#
# #torch.add(input, value=1, other, out=None) out=input+(other∗value)
#input (Tensor) – the first input Tensor
# value (Number) – the scalar multiplier for other
# other (Tensor) – the second input Tensor
print(torch.add(x,1,x)) x+1*x 中间1为配的系数
# -2
# -4
# #torch.mul(input,value,out=None) #value可以为数字,也可以为与 t1元素个数相同Tensor 相乘
print(torch.mul(t1,10)) #torch.FloatTensor of size 1x6
print(torch.mul(t1,t2)) #torch.FloatTensor of size 6x6
print(torch.mul(t2,t1)) #torch.FloatTensor of size 6x6
t2=torch.ones(7,1)
print(torch.mul(t1,t2)) #torch.FloatTensor of size 7x6
t2=torch.ones(7,2) #看来也不能乱乘
#print(torch.mul(t1,t2)) #RuntimeError: inconsistent tensor size
t1=torch.Tensor([1,2,3,4])
t2=torch.Tensor([[0,1],[0,1]])
#下面这个也报警告
# print(torch.mul(t1,t2)) #torch.FloatTensor of size 4
# print(torch.mul(t2,t1)) #torch.FloatTensor of size 2x2 #torch.div(input, value, out=None)
#out=tensor/value outi=inputi/otheri
print(torch.div(t1,2)) #torch.FloatTensor of size 4
# 0.5000
# 1.0000
# 1.5000
# 2.0000
#下面除法也会出现警告 UserWarning: self and other not broadcastable
#print(torch.div(t1,t2)) #torch.FloatTensor of size 4
# inf
#
# inf
#
#print(torch.div(t2,t1)) #[torch.FloatTensor of size 2x2]
# 0.0000 0.5000
# 0.0000 0.2500 #torch.addcdiv(tensor, value=1, tensor1, tensor2, out=None) → Tensor
# tensor (Tensor) – the tensor to be added
# value (Number, optional) – multiplier for tensor1 ./ tensor2
# tensor1 (Tensor) – Numerator tensor
# tensor2 (Tensor) – Denominator tensor
t=torch.ones(3,2)
t1=torch.ones(1,6)
t2=torch.ones(6,1)
#加上后面两个相除,加上后面两个相乘,中间还可以配个系数,不过报警告
#UserWarning: self, tensor1, and tensor2 not broadcastable, but have the same number of elements
#print(torch.addcdiv(t,0.1,t1,t1)) #torch.FloatTensor of size 3x2
# 1.1000 1.1000
# 1.1000 1.1000
# 1.1000 1.1000
#print(torch.addcmul(t,0.1,t1,t1)) #torch.FloatTensor of size 3x2
# 1.1000 1.1000
# 1.1000 1.1000
# 1.1000 1.1000 # torch.lerp(start, end, weight, out=None)
# outi=starti+weight∗(endi−starti)
print(t1,t2)
print(torch.lerp(t1,t2,0.5))
# 0.1000
# 0.9000
# 1.6000
# -2.4000
# [torch.FloatTensor of size 4]
#
#
#
#
#
# [torch.FloatTensor of size 2x2]
#
# 0.0500
# 0.9500
# 0.8000
# -0.7000
# [torch.FloatTensor of size 4] #torch.floor(input, out=None) → Tensor
#与ceil相对应的,floor #torch.clamp(input, min, max, out=None) → Tensor
# | min, if x_i < min
# y_i = | x_i, if min <= x_i <= max
# | max, if x_i > max
print(torch.clamp(t1,0,1)) #torch.FloatTensor of size 4
# 0.1000
# 0.9000
# 1.0000
# 0.0000
#必须要输入min或者max
print(torch.clamp(t1,min=0)) #torch.FloatTensor of size 4
# 0.1000
# 0.9000
# 1.6000
# 0.0000
print(torch.clamp(t1,max=1)) #torch.FloatTensor of size 4
# 0.1000
# 0.9000
# 1.0000
# -2.4000
print(torch.clamp(torch.randn(5,5),min=0,max=1)) #torch.FloatTensor of size 5x5
# 0.9985 0.4794 0.0000 0.1223 0.0000
# 0.0000 0.0000 0.0000 0.0000 0.1613
# 0.0527 0.1433 0.6362 0.0000 0.0000
# 0.4906 0.0000 0.0000 0.9332 0.0000
# 0.0000 0.0000 1.0000 0.3525 0.9937 #torch.erf(tensor, out=None) → Tensor
#Computes the error function of each element 计算每个元素的误差函数,S型,-1到1之间
print(torch.erf(torch.Tensor([-100,-10,-2,-1,0,0.5,1,2,10,100]))) #torch.FloatTensor of size 10
# -1.0000
# -1.0000
# -0.9953
# -0.8427
# 0.0000
# 0.5205
# 0.8427
# 0.9953
# 1.0000
# 1.0000
4、Math operations 数学运算
import torch
################################### Reduction operation ##################################
#注意:dim=0,按照列来处理的,dim=1,按照行来处理的,python数据处理列比行方便,0在1前
#torch.sum(input) → float 得到一个值,类加值
#torch.sum(input, dim, keepdim=False, out=None) → Tensor 对行或列加总,得到行或者列
#torch.cumprod(input, dim, out=None) → Tensor 累乘
#torch.cumsum(input, dim, out=None) → Tensor 累加
#torch.dist(input, other, p=2) → float 求 p范数 input:(Tensor), other:(Tensor), p:(float, optional)
#torch.mean(input) → float 求元素均值,得到一个值
#torch.mean(input, dim, keepdim=False, out=None) → Tensor
#torch.median(input) → float
#torch.median(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
#median与mean类似,整体和部分处理
#torch.mode(input, dim=-1, keepdim=False, values=None, indices=None) -> (Tensor, LongTensor)
#返回元组,两个元素,每行或者每列最小值和元素所在位置
#torch.norm(input, p=2) → float 返回p范数值 注意与torch.normal()区别
#torch.norm(input, p, dim, keepdim=False, out=None) → Tensor 与前面类似
#torch.prod(input) → float 返回一个值,所有元素累乘值
#torch.prod(input, dim, keepdim=False, out=None) → Tensor
#torch.std(input, unbiased=True) → floa 生成无偏的标准差
#torch.std(input, dim, keepdim=False, unbiased=True, out=None) → Tenso
#torch.var(input, unbiased=True) → float
#torch.var(input, dim, keepdim=False, unbiased=True, out=None) → Tensor ################################### Comparison operation ##################################
#torch.eq(input, other, out=None) → Tensor other可以为Tensor或者float,判断两个是否相等,得到0 1 Tensor
#torch.equal(tensor1, tensor2) → bool True 或者False
#torch.ge(input, other, out=None) → Tensor 与eq类似,判断是否大于等于other,返回0 1 Tensor
#torch.gt(input, other, out=None) → Tensor 判断是否大于other,返回0 1 Tensor
#torch.le(input, other, out=None) → Tensor 判断是否小于等于other,返回0 1 Tensor
#torch.lt(input, other, out=None) → Tensor 判断是否小于other,返回0 1 Tensor
#torch.max(input) → float
#torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
#torch.max(input, other, out=None) → Tensor 取两个Tensor中较大的元素组成Tensor
#torch.min(input) → float
#torch.min(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor)
#torch.min(input, other, out=None) → Tensor 取两个Tensor中较小的元素组成Tensor
#torch.ne(input, other, out=None) → Tensor 不等于,other可以为Tensor或者float
#torch.sort(input, dim=None, descending=False, out=None) -> (Tensor, LongTensor) 返回Tuple
x=torch.randn(3,3)
sorted,indices=torch.sort(x)
print(sorted,indices)
# -1.7012 0.2619 0.3892
# -1.8940 -0.7567 1.2057
# -0.8224 0.7787 1.3752
# [torch.FloatTensor of size 3x3]
# 0 2 1
# 2 1 0
# 2 1 0
# [torch.LongTensor of size 3x3] #torch.kthvalue(input, k, dim=None, keepdim=False, out=None) -> (Tensor, LongTensor)
# k:第k个最小元素,返回第k个最小元素
#torch.topk(input, k, dim=None, largest=True, sorted=True, out=None) -> (Tensor, LongTensor)
#返回前k个最大元素,注意是前k个,largest=False,返回前k个最小元素 ################################### Comparison operation ##################################
#torch.cross(input, other, dim=-1, out=None) → Tensor 向量积、叉乘与mul点乘对应
x=torch.Tensor([1,2,3])
y=torch.Tensor([2,5,1])
print(torch.cross(x,y)) #https://baike.baidu.com/item/%E5%90%91%E9%87%8F%E7%A7%AF/4601007?fr=aladdin
# -13 2*1-3*5
# 5 3*2-1*1
# 1 1*5-2*2
# [torch.FloatTensor of size 3]
#torch.trace(input) → float 得到主对角线和
#torch.diag(input, diagonal=0, out=None) → Tensor 1、一维到二维Tensor 2、二维到一维Tensor,一维就是主对角线上,有参数可以调节
a=torch.randn(3)
print(a)
# 2.1126
# -1.4150
# 0.4451 [torch.FloatTensor of size 3] print(torch.diag(a))
# 2.1126 0.0000 0.0000
# 0.0000 -1.4150 0.0000
# 0.0000 0.0000 0.4451
# [torch.FloatTensor of size 3x3] a=torch.randn(3,3)
print(a)
# 0.6810 -2.1620 -0.3158
# 0.0545 1.1060 1.3524
# 0.1481 -1.1468 -0.0113
# [torch.FloatTensor of size 3x3]
print(torch.diag(a,0))
# 1.0596
# 1.2221
# 0.2311
# [torch.FloatTensor of size 3]
print(torch.diag(a,1))
# 1.8588
# -0.2285
# [torch.FloatTensor of size 2] #torch.tril(input, diagonal=0, out=None) → Tensor 删除Tensor部分对角线数据
#torch.triu(input, diagonal=0, out=None) → Tensor 与tril类似
#torch.inverse(input, out=None) → Tensor 求Tensor的逆
#torch.mm(mat1, mat2, out=None) → Tensor 两个Tensor的矩阵乘积
data=[[1,2],[3,4]]
tensor=torch.FloatTensor(data)
tensor
Out[23]:
tensor([[ 1., 2.],
[ 3., 4.]])
np.matmul(data,data)
Out[24]:
array([[ 7, 10],
[15, 22]])
torch.mm(tensor,tensor)
Out[25]:
tensor([[ 7., 10.],
[ 15., 22.]])
#torch.mv(mat, vec, out=None) → Tensor
mat = torch.randn(2, 3) # [torch.FloatTensor of size 2x3]
vec = torch.randn(3) # [torch.FloatTensor of size 3]
torch.mv(mat, vec)
# -2.0939
# -2.2950
# [torch.FloatTensor of size 2]
#
#torch.qr(input, out=None) -> (Tensor, Tensor) 齐次分解
torch文档学习笔记的更多相关文章
- 通过程序校验xml文档学习笔记
校验xml文档,可以通过程序来校验,利用一段js代码即可. 各行代码的含义已经写出,运行这个html文件,检验如下xml代码: 结果如下: 如果xml文档出现错误: 结果如下: 其中,obj.asyn ...
- torch.Tensor文档学习笔记
A torch.Tensor is a multi-dimensional matrix containing elements of a single data type. 张量(torch.Ten ...
- pytorch文档学习笔记(2)
三.CUDA semantics 二.Broadcasting semantics 广播机制 广播机制要第一项对齐,第一项对齐后(相等)才能广播,或者某个第一项为1. 但如果两个size个数相等,则 ...
- Spring3.0官网文档学习笔记(八)--3.4.3~3.4.6
3.4.3 使用depends-on 使用depends-on能够强制使一个或多个beans先初始化,之后再对这个bean进行初始化. 多个bean之间用","." ...
- Spring3.0官网文档学习笔记(一)
Part 1 Spring框架概述 Spring是模块化的,在应用中仅仅须要引入你所须要用到的模块的jar包,其余的jar包不用引入. spring框架支持声明式的事务管理,通过RMI或web ser ...
- Spring3.0官网文档学习笔记(七)--3.4.2
3.4.2 依赖与配置的细节 3.4.2.1 Straight values (primitives, Strings, and so on) JavaBeans PropertyE ...
- Spring3.0官网文档学习笔记(二)
1.3 使用场景 典型的成熟的spring web应用 spring使用第三方框架作为中间层 远程使用场景 EJB包装 1.3.1 依赖管理.命名规则(包) spring-*.jar *号代表 ...
- android官网文档学习笔记
1.android的四大组件的了大概功能 activity:负责显示界面,和用户交互. service:运行在后台. content provider:为程序app之间的数据访问提供接口. broad ...
- Spring3.0官网文档学习笔记(四)--3.1~3.2.3
3.1 Spring IoC容器与Beans简单介绍 BeanFactory接口提供对随意对象的配置: ApplicationContext是BeanFactory的子接口.整合了Sp ...
随机推荐
- Day5-T2
原题目 根据社会学研究表明,人们都喜欢和自己身高相近的人做朋友. 现在有 N 名身高各不相同的同学依次走进教室. 调查人员想预测每个人在走入教室的瞬间最想和 已经在教室的哪个人做朋友.当有两名同学和这 ...
- MinGW下编译curl-7.60.0时, 发生ERROR_FILE_NOT_FOUND undeclared
在编译curl-7.60.0时, 遇到ERROR_FILE_NOT_FOUND undeclared 这个情况, 就没法编译成功!! 下载了以往的版本, 发现是从curl-7.59.0版本开始才有 t ...
- 001.Delphi插件之QPlugins,一个最简单的插件
安装QPlugins里面的Demo,复制粘贴着写了一个最简单的插件,看看好不好用 EXE代码如下: unit Main_Frm; interface uses Winapi.Windows, Wina ...
- 初学Java(1)
1.Java基本数据类型: 2.Java的main方法: 被static修饰,类名与文件名相同:void:方法的返回值,无返回值:main方法是程序的入口,有且只有一个: String[] args: ...
- NO4 find&mv-&-特殊符号..和.
问题七:退到上一级目录,删除data目录. 解答:cd ..或cd ../ rm -r data或rmdir data#空目录就不需要带-rf,杀鸡不用宰牛刀,rmdir基本要淘汰的命令 ...
- uni-app开发小程序-使用uni.switchTab跳转后页面不刷新的问题
uni.switchTab({ url: '/pages/discover/discover', success: function(e) { var page = getCurrentPages() ...
- 完整版excel上传导入读写批量数据并将反馈结果写入远程exel中
思路:excel的读写借助于poi框架,在写入远程的时候,是不能直接写入的,本博主将传入的文件再次拉下来写到项目临时文件中,然后,在临时文件中写入,然后,以同样的名称路径覆盖掉远程的就可以了,稍微有点 ...
- 十一、React 获取服务器数据: axios插件、 fetch-jsonp插件的使用
react获取服务器APi接口的数据: react中没有提供专门的请求数据的模块.但是我们可以使用任何第三方请求数据模块实现请求数据 一.axios 获取Api数据 使用文档:https://www. ...
- 三、jsx简化教程
1)使用 JSX 的好处 1.提供更加语意化且易懂的标签 与html对比 <!--HTML写法--> <form class="messageBox"> & ...
- 吴裕雄--天生自然C++语言学习笔记:C++ 实例
C++ 实例 - 输出 "Hello, World!" #include <iostream> using namespace std; int main() { co ...