github: https://github.com/tensorboy/pytorch_Realtime_Multi-Person_Pose_Estimation

  1. # -*- coding: utf-8 -*
  2. import os
  3. import re
  4. import sys
  5. import cv2
  6. import math
  7. import time
  8. import scipy
  9. import argparse
  10. import matplotlib
  11. import numpy as np
  12. import pylab as plt
  13. from joblib import Parallel, delayed
  14. import util
  15. import torch
  16. import torch as T
  17. import torch.nn as nn
  18. import torch.nn.functional as F
  19. from torch.autograd import Variable
  20. from collections import OrderedDict
  21. from config_reader import config_reader
  22. from scipy.ndimage.filters import gaussian_filter
  23. #parser = argparse.ArgumentParser()
  24. #parser.add_argument('--t7_file', required=True)
  25. #parser.add_argument('--pth_file', required=True)
  26. #args = parser.parse_args()
  27.  
  28. torch.set_num_threads(torch.get_num_threads())
  29. weight_name = './model/pose_model.pth'
  30.  
  31. blocks = {}
  32. # 从1开始算的limb,图对应:Pose Output Format
  33. # find connection in the specified sequence, center 29 is in the position 15
  34. limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
  35. [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
  36. [1,16], [16,18], [3,17], [6,18]]
  37.  
  38. # the middle joints heatmap correpondence
  39. mapIdx = [[31,32], [39,40], [33,34], [35,36], [41,42], [43,44], [19,20], [21,22], \
  40. [23,24], [25,26], [27,28], [29,30], [47,48], [49,50], [53,54], [51,52], \
  41. [55,56], [37,38], [45,46]]
  42.  
  43. # visualize
  44. colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
  45. [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
  46. [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
  47.  
  48. # heatmap channel为19 表示关节点的score
  49. # PAF channel为38 表示limb的单位向量
  50. block0 = [{'conv1_1':[3,64,3,1,1]},{'conv1_2':[64,64,3,1,1]},{'pool1_stage1':[2,2,0]},{'conv2_1':[64,128,3,1,1]},{'conv2_2':[128,128,3,1,1]},{'pool2_stage1':[2,2,0]},{'conv3_1':[128,256,3,1,1]},{'conv3_2':[256,256,3,1,1]},{'conv3_3':[256,256,3,1,1]},{'conv3_4':[256,256,3,1,1]},{'pool3_stage1':[2,2,0]},{'conv4_1':[256,512,3,1,1]},{'conv4_2':[512,512,3,1,1]},{'conv4_3_CPM':[512,256,3,1,1]},{'conv4_4_CPM':[256,128,3,1,1]}]
  51.  
  52. blocks['block1_1'] = [{'conv5_1_CPM_L1':[128,128,3,1,1]},{'conv5_2_CPM_L1':[128,128,3,1,1]},{'conv5_3_CPM_L1':[128,128,3,1,1]},{'conv5_4_CPM_L1':[128,512,1,1,0]},{'conv5_5_CPM_L1':[512,38,1,1,0]}]
  53.  
  54. blocks['block1_2'] = [{'conv5_1_CPM_L2':[128,128,3,1,1]},{'conv5_2_CPM_L2':[128,128,3,1,1]},{'conv5_3_CPM_L2':[128,128,3,1,1]},{'conv5_4_CPM_L2':[128,512,1,1,0]},{'conv5_5_CPM_L2':[512,19,1,1,0]}]
  55.  
  56. for i in range(2,7):
  57. blocks['block%d_1'%i] = [{'Mconv1_stage%d_L1'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L1'%i:[128,128,7,1,3]},
  58. {'Mconv5_stage%d_L1'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L1'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L1'%i:[128,38,1,1,0]}]
  59. blocks['block%d_2'%i] = [{'Mconv1_stage%d_L2'%i:[185,128,7,1,3]},{'Mconv2_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv3_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv4_stage%d_L2'%i:[128,128,7,1,3]},
  60. {'Mconv5_stage%d_L2'%i:[128,128,7,1,3]},{'Mconv6_stage%d_L2'%i:[128,128,1,1,0]},{'Mconv7_stage%d_L2'%i:[128,19,1,1,0]}]
  61.  
  62. def make_layers(cfg_dict):
  63. layers = []
  64. for i in range(len(cfg_dict)-1):
  65. one_ = cfg_dict[i]
  66. for k,v in one_.iteritems():
  67. if 'pool' in k:
  68. layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
  69. else:
  70. conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
  71. layers += [conv2d, nn.ReLU(inplace=True)]
  72. one_ = cfg_dict[-1].keys()
  73. k = one_[0]
  74. v = cfg_dict[-1][k]
  75. conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
  76. layers += [conv2d]
  77. return nn.Sequential(*layers)
  78.  
  79. layers = []
  80. for i in range(len(block0)):
  81. one_ = block0[i]
  82. for k,v in one_.iteritems():
  83. if 'pool' in k:
  84. layers += [nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2] )]
  85. else:
  86. conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride = v[3], padding=v[4])
  87. layers += [conv2d, nn.ReLU(inplace=True)]
  88.  
  89. models = {}
  90. models['block0']=nn.Sequential(*layers)
  91.  
  92. for k,v in blocks.iteritems():
  93. models[k] = make_layers(v)
  94.  
  95. class pose_model(nn.Module):
  96. def __init__(self,model_dict,transform_input=False):
  97. super(pose_model, self).__init__()
  98. self.model0 = model_dict['block0']
  99. self.model1_1 = model_dict['block1_1']
  100. self.model2_1 = model_dict['block2_1']
  101. self.model3_1 = model_dict['block3_1']
  102. self.model4_1 = model_dict['block4_1']
  103. self.model5_1 = model_dict['block5_1']
  104. self.model6_1 = model_dict['block6_1']
  105.  
  106. self.model1_2 = model_dict['block1_2']
  107. self.model2_2 = model_dict['block2_2']
  108. self.model3_2 = model_dict['block3_2']
  109. self.model4_2 = model_dict['block4_2']
  110. self.model5_2 = model_dict['block5_2']
  111. self.model6_2 = model_dict['block6_2']
  112.  
  113. def forward(self, x):
  114. out1 = self.model0(x)
  115.  
  116. out1_1 = self.model1_1(out1)
  117. out1_2 = self.model1_2(out1)
  118. out2 = torch.cat([out1_1,out1_2,out1],1)
  119.  
  120. out2_1 = self.model2_1(out2)
  121. out2_2 = self.model2_2(out2)
  122. out3 = torch.cat([out2_1,out2_2,out1],1)
  123.  
  124. out3_1 = self.model3_1(out3)
  125. out3_2 = self.model3_2(out3)
  126. out4 = torch.cat([out3_1,out3_2,out1],1)
  127.  
  128. out4_1 = self.model4_1(out4)
  129. out4_2 = self.model4_2(out4)
  130. out5 = torch.cat([out4_1,out4_2,out1],1)
  131.  
  132. out5_1 = self.model5_1(out5)
  133. out5_2 = self.model5_2(out5)
  134. out6 = torch.cat([out5_1,out5_2,out1],1)
  135.  
  136. out6_1 = self.model6_1(out6)
  137. out6_2 = self.model6_2(out6)
  138.  
  139. return out6_1,out6_2
  140.  
  141. model = pose_model(models)
  142. model.load_state_dict(torch.load(weight_name))
  143. model.cuda()
  144. model.float()
  145. model.eval()
  146.  
  147. param_, model_ = config_reader()
  148.  
  149. #torch.nn.functional.pad(img pad, mode='constant', value=model_['padValue'])
  150. tic = time.time()
  151. test_image = './sample_image/ski.jpg'
  152. #test_image = 'a.jpg'
  153. oriImg = cv2.imread(test_image) # B,G,R order
  154. imageToTest = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(),0),2,3),1,2),volatile=True).cuda()
  155.  
  156. multiplier = [x * model_['boxsize'] / oriImg.shape[0] for x in param_['scale_search']] # 不同scale输入
  157.  
  158. heatmap_avg = torch.zeros((len(multiplier),19,oriImg.shape[0], oriImg.shape[1])).cuda()
  159. paf_avg = torch.zeros((len(multiplier),38,oriImg.shape[0], oriImg.shape[1])).cuda()
  160. #print heatmap_avg.size()
  161.  
  162. toc =time.time()
  163. print 'time is %.5f'%(toc-tic)
  164. tic = time.time()
  165. for m in range(len(multiplier)):
  166. scale = multiplier[m]
  167. h = int(oriImg.shape[0]*scale)
  168. w = int(oriImg.shape[1]*scale)
  169. pad_h = 0 if (h%model_['stride']==0) else model_['stride'] - (h % model_['stride'])
  170. pad_w = 0 if (w%model_['stride']==0) else model_['stride'] - (w % model_['stride'])
  171. new_h = h+pad_h
  172. new_w = w+pad_w
  173.  
  174. imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
  175. imageToTest_padded, pad = util.padRightDownCorner(imageToTest, model_['stride'], model_['padValue'])
  176. imageToTest_padded = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5
  177. # (-0.5~0.5)
  178. feed = Variable(T.from_numpy(imageToTest_padded)).cuda()
  179. output1,output2 = model(feed)
  180. print output1.size()
  181. print output2.size()
  182. heatmap = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output2) # 对output上采样至原图大小
  183.  
  184. paf = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output1) # 同理
  185.  
  186. heatmap_avg[m] = heatmap[0].data
  187. paf_avg[m] = paf[0].data
  188.  
  189. toc =time.time()
  190. print 'time is %.5f'%(toc-tic)
  191. tic = time.time()
  192. # 不同scale的heatmap和PAF取均值
  193. heatmap_avg = T.transpose(T.transpose(T.squeeze(T.mean(heatmap_avg, 0)),0,1),1,2).cuda()
  194. paf_avg = T.transpose(T.transpose(T.squeeze(T.mean(paf_avg, 0)),0,1),1,2).cuda()
  195. heatmap_avg=heatmap_avg.cpu().numpy()
  196. paf_avg = paf_avg.cpu().numpy()
  197. toc =time.time()
  198. print 'time is %.5f'%(toc-tic)
  199. tic = time.time()
  200.  
  201. all_peaks = []
  202. peak_counter = 0
  203.  
  204. #maps =
  205. # picture array is reversed
  206. for part in range(18): # 18个关节点的featuremap
  207. map_ori = heatmap_avg[:,:,part]
  208. map = gaussian_filter(map_ori, sigma=3)
  209.  
  210. map_left = np.zeros(map.shape)
  211. map_left[1:,:] = map[:-1,:]
  212. map_right = np.zeros(map.shape)
  213. map_right[:-1,:] = map[1:,:]
  214. map_up = np.zeros(map.shape)
  215. map_up[:,1:] = map[:,:-1]
  216. map_down = np.zeros(map.shape)
  217. map_down[:,:-1] = map[:,1:]
  218.  
  219. # 计算是否为局部极值
  220. peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > param_['thre1']))
  221. # peaks_binary = T.eq(
  222. # peaks = zip(T.nonzero(peaks_binary)[0],T.nonzero(peaks_binary)[0])
  223.  
  224. peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse
  225.  
  226. peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
  227. id = range(peak_counter, peak_counter + len(peaks))
  228. peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]
  229.  
  230. all_peaks.append(peaks_with_score_and_id) # 一个关节点featuremap上不同人的peak [[y, x, peak_score, id)],...]
  231. peak_counter += len(peaks)
  232.  
  233. # 计算线性积分 采样10个点计算
  234. connection_all = []
  235. special_k = []
  236. mid_num = 10
  237.  
  238. for k in range(len(mapIdx)):
  239. score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]] # channel为2的paf_avg,表示PAF向量
  240. candA = all_peaks[limbSeq[k][0]-1] #第k个limb中左关节点的候选集合A(不同人的关节点)
  241. candB = all_peaks[limbSeq[k][1]-1] #第k个limb中右关节点的候选集合B(不同人的关节点)
  242. nA = len(candA)
  243. nB = len(candB)
  244. # indexA, indexB = limbSeq[k]
  245. if(nA != 0 and nB != 0): # 有候选时开始连接
  246. connection_candidate = []
  247. for i in range(nA):
  248. for j in range(nB):
  249. vec = np.subtract(candB[j][:2], candA[i][:2])
  250. norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1])
  251. vec = np.divide(vec, norm) # 计算单位向量
  252.  
  253. startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
  254. np.linspace(candA[i][1], candB[j][1], num=mid_num)) # 在A[i],B[j]连接线上采样mid_num个点
  255.  
  256. # 计算采样点的PAF向量
  257. vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
  258. for I in range(len(startend))])
  259. vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
  260. for I in range(len(startend))])
  261.  
  262. # 采样点的PAF向量与limb的单位向量计算余弦相似度score,内积
  263. score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
  264. score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)
  265. criterion1 = len(np.nonzero(score_midpts > param_['thre2'])[0]) > 0.8 * len(score_midpts)
  266. criterion2 = score_with_dist_prior > 0
  267. if criterion1 and criterion2:
  268. # (i,j,score,score_all)
  269. connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])
  270.  
  271. connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) # 按score排序
  272. connection = np.zeros((0,5))
  273. for c in range(len(connection_candidate)):
  274. i,j,s = connection_candidate[c][0:3]
  275. if(i not in connection[:,3] and j not in connection[:,4]):
  276. connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) # A_id, B_id, score, i, j
  277. if(len(connection) >= min(nA, nB)):
  278. break
  279.  
  280. connection_all.append(connection) # 多个符合当前limb的组合 [[A_id, B_id, score, i, j],...]
  281. else:
  282. special_k.append(k)
  283. connection_all.append([])
  284.  
  285. '''
  286. function: 关节点连成每个人的limb
  287. subset: last number in each row is the total parts number of that person
  288. subset: the second last number in each row is the score of the overall configuration
  289. candidate: 候选关节点
  290. connection_all: 候选limb
  291.  
  292. '''
  293. subset = -1 * np.ones((0, 20))
  294. candidate = np.array([item for sublist in all_peaks for item in sublist]) # 一个id的(y,x,score,id)(关节点)
  295.  
  296. for k in range(len(mapIdx)):
  297. if k not in special_k:
  298. partAs = connection_all[k][:,0] # 第k个limb,左端点的候选id集合
  299. partBs = connection_all[k][:,1] # 第k个limb,右端点的候选id集合
  300. indexA, indexB = np.array(limbSeq[k]) - 1 # 关节点index
  301.  
  302. for i in range(len(connection_all[k])): #= 1:size(temp,1)
  303. found = 0
  304. subset_idx = [-1, -1]
  305. for j in range(len(subset)): #1:size(subset,1): 遍历subset里每个人,看当前两个关节点出现过几次
  306. if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
  307. subset_idx[found] = j
  308. found += 1
  309.  
  310. if found == 1: # 在这个人的subset里连上这个limb
  311. j = subset_idx[0]
  312. if(subset[j][indexB] != partBs[i]):
  313. subset[j][indexB] = partBs[i]
  314. subset[j][-1] += 1
  315. subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
  316. elif(subset[j][indexA] != partAs[i]):
  317. subset[j][indexA] = partAs[i]
  318. subset[j][-1] += 1
  319. subset[j][-2] += candidate[partAs[i].astype(int), 2] + connection_all[k][i][2]
  320.  
  321. elif found == 2: # if found 2 and disjoint, merge them
  322. j1, j2 = subset_idx
  323. print "found = 2"
  324. membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
  325. if len(np.nonzero(membership == 2)[0]) == 0:
  326. # 如果两个人的相同关节点没有在各自的subset中都连成limb,那么合并两个subset构成一个人
  327. subset[j1][:-2] += (subset[j2][:-2] + 1)
  328. subset[j1][-2:] += subset[j2][-2:]
  329. subset[j1][-2] += connection_all[k][i][2]
  330. subset = np.delete(subset, j2, 0)
  331. # To-Do 这里有问题, 怎么合并才对?
  332. # else: # as like found == 1
  333. # subset[j1][indexB] = partBs[i]
  334. # subset[j1][-1] += 1
  335. # subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
  336.  
  337. # if find no partA in the subset, create a new subset
  338. elif not found and k < 17:
  339. row = -1 * np.ones(20)
  340. row[indexA] = partAs[i]
  341. row[indexB] = partBs[i]
  342. row[-1] = 2
  343. row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
  344. subset = np.vstack([subset, row])
  345.  
  346. # delete some rows of subset which has few parts occur
  347. deleteIdx = [];
  348. for i in range(len(subset)):
  349. if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
  350. deleteIdx.append(i)
  351. subset = np.delete(subset, deleteIdx, axis=0)
  352.  
  353. canvas = cv2.imread(test_image) # B,G,R order
  354. for i in range(18):
  355. for j in range(len(all_peaks[i])):
  356. cv2.circle(canvas, all_peaks[i][j][0:2], 4, colors[i], thickness=-1)
  357.  
  358. stickwidth = 4
  359.  
  360. for i in range(17):
  361. for n in range(len(subset)):
  362. index = subset[n][np.array(limbSeq[i])-1] # limb的两个关节点index
  363. if -1 in index:
  364. continue
  365. cur_canvas = canvas.copy()
  366. Y = candidate[index.astype(int), 0] # 两个index点的纵坐标
  367. X = candidate[index.astype(int), 1] # 两个index点的横坐标
  368. mX = np.mean(X)
  369. mY = np.mean(Y)
  370. length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
  371. angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
  372. polygon = cv2.ellipse2Poly((int(mY),int(mX)), (int(length/2), stickwidth), int(angle), 0, 360, 1)
  373. cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
  374. canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
  375.  
  376. #Parallel(n_jobs=1)(delayed(handle_one)(i) for i in range(18))
  377.  
  378. toc =time.time()
  379. print 'time is %.5f'%(toc-tic)
  380. cv2.imwrite('result.png',canvas)

openpose pytorch代码分析的更多相关文章

  1. (原)SphereFace及其pytorch代码

    转载请注明出处: http://www.cnblogs.com/darkknightzh/p/8524937.html 论文: SphereFace: Deep Hypersphere Embeddi ...

  2. 目标检测之Faster-RCNN的pytorch代码详解(数据预处理篇)

    首先贴上代码原作者的github:https://github.com/chenyuntc/simple-faster-rcnn-pytorch(非代码作者,博文只解释代码) 今天看完了simple- ...

  3. 残差网络resnet理解与pytorch代码实现

    写在前面 ​ 深度残差网络(Deep residual network, ResNet)自提出起,一次次刷新CNN模型在ImageNet中的成绩,解决了CNN模型难训练的问题.何凯明大神的工作令人佩服 ...

  4. Android代码分析工具lint学习

    1 lint简介 1.1 概述 lint是随Android SDK自带的一个静态代码分析工具.它用来对Android工程的源文件进行检查,找出在正确性.安全.性能.可使用性.可访问性及国际化等方面可能 ...

  5. pmd静态代码分析

    在正式进入测试之前,进行一定的静态代码分析及code review对代码质量及系统提高是有帮助的,以上为数据证明 Pmd 它是一个基于静态规则集的Java源码分析器,它可以识别出潜在的如下问题:– 可 ...

  6. [Asp.net 5] DependencyInjection项目代码分析-目录

    微软DI文章系列如下所示: [Asp.net 5] DependencyInjection项目代码分析 [Asp.net 5] DependencyInjection项目代码分析2-Autofac [ ...

  7. [Asp.net 5] DependencyInjection项目代码分析4-微软的实现(5)(IEnumerable<>补充)

    Asp.net 5的依赖注入注入系列可以参考链接: [Asp.net 5] DependencyInjection项目代码分析-目录 我们在之前讲微软的实现时,对于OpenIEnumerableSer ...

  8. 完整全面的Java资源库(包括构建、操作、代码分析、编译器、数据库、社区等等)

    构建 这里搜集了用来构建应用程序的工具. Apache Maven:Maven使用声明进行构建并进行依赖管理,偏向于使用约定而不是配置进行构建.Maven优于Apache Ant.后者采用了一种过程化 ...

  9. STM32启动代码分析 IAR 比较好

    stm32启动代码分析 (2012-06-12 09:43:31) 转载▼     最近开始使用ST的stm32w108芯片(也是一款zigbee芯片).开始看他的启动代码看的晕晕呼呼呼的. 还好在c ...

随机推荐

  1. 设计模式C++学习笔记之十二(Command命令模式)

      命令模式,将一个请求封装为一个对象,从而使你可用不同的请求对客户进行参数化:对请求排队或记录请求日志,以及支持可撤消的操作.应该是一个比较简单的模式了. 12.1.解释 main(),客户 CIn ...

  2. 计算机中内存、cache和寄存器之间的关系及区别

    1. 寄存器是中央处理器内的组成部份.寄存器是有限存贮容量的高速存贮部件,它们可用来暂存指令.数据和位址.在中央处理器的控制部件中,包含的寄存 器有指令寄存器(IR)和程序计数器(PC).在中央处理器 ...

  3. 题解-bzoj4221 JOI2012kangaroo

    Problem bzoj 题意:给定\(n\)只袋鼠,每只袋鼠有俩属性\(a,b\),若\(a_i\leq b_j\),则\(i\)是可以被\(j\)放置在袋子里的,求经过一系列放置操作后无法进行操作 ...

  4. hibernate框架学习第四天:关联关系、外键、级联等

    一对多关联关系表 一方 多方(外键)实体类 一方:TeacherModel 添加多方的集合Set 多方StudentModel 添加一方的对象一方配置关系 name:一方模型中描述多方的集合对象名 c ...

  5. exiting pxe rom 无法启动

    背景 我这是给人装了多少次机器了,上千次不敢说,几百次是有了.有个奇怪现象,为什么每次总有新的问题呢,极少能一次成功的.除了让我涨了见识,没想到其他的用处.程序员修电脑,搞笑吧,还有找我修洗衣机的,说 ...

  6. AviSynth AVS Importer Plugin for Adobe Premiere Pro CC 2015 x64

    Premiere CS AVS Importer x64.prm copy to Adobe\Adobe Premiere Pro CC 2015\Plug-Ins\Common\ VSFilterM ...

  7. 微信小程序-WebSocket应用

    为何有 HTTP 协议还需要 WebSocket ? Http协议 有个缺陷:通信只能由客户端发起.举例来说,我们想了解今天的天气,只能是客户端向服务器发出请求,服务器返回查询结果.HTTP 协议做不 ...

  8. $Django 站点:样式--文章--分类文章--文章详情--文章评论点赞--文章评论点赞统计(数据库优化)

    <h3>个人站点下的</h3> 知识点 url (r'(?P<username>\w+)/p/(?P<id>\d+)', xiangxi,name='x ...

  9. ansible结合zabbix_api批量添加主机

    批量添加zabbix监控 .使用ansible配置zabbix客户端 ①修改服务器的IP(域名),为了方便使用ansible来批量操作 等同于如下sed语句 sed -i 's#Server=1.1. ...

  10. 51nod--1265 四点共面 (计算几何基础, 点积, 叉积)

    题目: 1265 四点共面 基准时间限制:1 秒 空间限制:131072 KB 分值: 0 难度:基础题 收藏 关注 给出三维空间上的四个点(点与点的位置均不相同),判断这4个点是否在同一个平面内(4 ...