Intel DAAL AI加速——神经网络
# file: neural_net_dense_batch.py
#===============================================================================
# Copyright 2014-2018 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were
# provided to you (License). Unless the License provides otherwise, you may not
# use, modify, copy, publish, distribute, disclose or transmit this software or
# the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express
# or implied warranties, other than those that are expressly stated in the
# License.
#=============================================================================== #
# ! Content:
# ! Python example of neural network training and scoring
# !***************************************************************************** #
## <a name="DAAL-EXAMPLE-PY-NEURAL_NET_DENSE_BATCH"></a>
## \example neural_net_dense_batch.py
# import os
import sys import numpy as np from daal.algorithms.neural_networks import initializers
from daal.algorithms.neural_networks import layers
from daal.algorithms import optimization_solver
from daal.algorithms.neural_networks import training, prediction
from daal.data_management import NumericTable, HomogenNumericTable utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
from utils import printTensors, readTensorFromCSV # Input data set parameters
trainDatasetFile = os.path.join("..", "data", "batch", "neural_network_train.csv")
trainGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_train_ground_truth.csv")
testDatasetFile = os.path.join("..", "data", "batch", "neural_network_test.csv")
testGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_test_ground_truth.csv") fc1 = 0
fc2 = 1
sm1 = 2 batchSize = 10 def configureNet():
# Create layers of the neural network
# Create fully-connected layer and initialize layer parameters
fullyConnectedLayer1 = layers.fullyconnected.Batch(5)
fullyConnectedLayer1.parameter.weightsInitializer = initializers.uniform.Batch(-0.001, 0.001)
fullyConnectedLayer1.parameter.biasesInitializer = initializers.uniform.Batch(0, 0.5) # Create fully-connected layer and initialize layer parameters
fullyConnectedLayer2 = layers.fullyconnected.Batch(2)
fullyConnectedLayer2.parameter.weightsInitializer = initializers.uniform.Batch(0.5, 1)
fullyConnectedLayer2.parameter.biasesInitializer = initializers.uniform.Batch(0.5, 1) # Create softmax layer and initialize layer parameters
softmaxCrossEntropyLayer = layers.loss.softmax_cross.Batch() # Create configuration of the neural network with layers
topology = training.Topology() # Add layers to the topology of the neural network
topology.push_back(fullyConnectedLayer1)
topology.push_back(fullyConnectedLayer2)
topology.push_back(softmaxCrossEntropyLayer)
topology.get(fc1).addNext(fc2)
topology.get(fc2).addNext(sm1)
return topology def trainModel():
# Read training data set from a .csv file and create a tensor to store input data
trainingData = readTensorFromCSV(trainDatasetFile)
trainingGroundTruth = readTensorFromCSV(trainGroundTruthFile, True) sgdAlgorithm = optimization_solver.sgd.Batch(fptype=np.float32) # Set learning rate for the optimization solver used in the neural network
learningRate = 0.001
sgdAlgorithm.parameter.learningRateSequence = HomogenNumericTable(1, 1, NumericTable.doAllocate, learningRate)
# Set the batch size for the neural network training
sgdAlgorithm.parameter.batchSize = batchSize
sgdAlgorithm.parameter.nIterations = int(trainingData.getDimensionSize(0) / sgdAlgorithm.parameter.batchSize) # Create an algorithm to train neural network
net = training.Batch(sgdAlgorithm) sampleSize = trainingData.getDimensions()
sampleSize[0] = batchSize # Configure the neural network
topology = configureNet()
net.initialize(sampleSize, topology) # Pass a training data set and dependent values to the algorithm
net.input.setInput(training.data, trainingData)
net.input.setInput(training.groundTruth, trainingGroundTruth) # Run the neural network training and retrieve training model
trainingModel = net.compute().get(training.model)
# return prediction model
return trainingModel.getPredictionModel_Float32() def testModel(predictionModel):
# Read testing data set from a .csv file and create a tensor to store input data
predictionData = readTensorFromCSV(testDatasetFile) # Create an algorithm to compute the neural network predictions
net = prediction.Batch() net.parameter.batchSize = predictionData.getDimensionSize(0) # Set input objects for the prediction neural network
net.input.setModelInput(prediction.model, predictionModel)
net.input.setTensorInput(prediction.data, predictionData) # Run the neural network prediction
# and return results of the neural network prediction
return net.compute() def printResults(predictionResult):
# Read testing ground truth from a .csv file and create a tensor to store the data
predictionGroundTruth = readTensorFromCSV(testGroundTruthFile) printTensors(predictionGroundTruth, predictionResult.getResult(prediction.prediction),
"Ground truth", "Neural network predictions: each class probability",
"Neural network classification results (first 20 observations):", 20) topology = ""
if __name__ == "__main__": predictionModel = trainModel() predictionResult = testModel(predictionModel) printResults(predictionResult)
目前支持的Layers:
- Common Parameters
- Fully Connected Forward Layer
- Fully Connected Backward Layer
- Absolute Value ForwardLayer
- Absolute Value Backward Layer
- Logistic ForwardLayer
- Logistic BackwardLayer
- pReLU ForwardLayer
- pReLU BackwardLayer
- ReLU Forward Layer
- ReLU BackwardLayer
- SmoothReLU ForwardLayer
- SmoothReLU BackwardLayer
- Hyperbolic Tangent Forward Layer
- Hyperbolic Tangent Backward Layer
- Batch Normalization Forward Layer
- Batch Normalization Backward Layer
- Local-Response Normalization ForwardLayer
- Local-Response Normalization Backward Layer
- Local-Contrast Normalization ForwardLayer
- Local-Contrast Normalization Backward Layer
- Dropout ForwardLayer
- Dropout BackwardLayer
- 1D Max Pooling Forward Layer
- 1D Max Pooling Backward Layer
- 2D Max Pooling Forward Layer
- 2D Max Pooling Backward Layer
- 3D Max Pooling Forward Layer
- 3D Max Pooling Backward Layer
- 1D Average Pooling Forward Layer
- 1D Average Pooling Backward Layer
- 2D Average Pooling Forward Layer
- 2D Average Pooling Backward Layer
- 3D Average Pooling Forward Layer
- 3D Average Pooling Backward Layer
- 2D Stochastic Pooling Forward Layer
- 2D Stochastic Pooling Backward Layer
- 2D Spatial Pyramid Pooling ForwardLayer
- 2D Spatial Pyramid Pooling BackwardLayer
- 2D Convolution Forward Layer
- 2D Convolution Backward Layer
- 2D Transposed Convolution ForwardLayer
- 2D Transposed Convolution BackwardLayer
- 2D Locally-connected Forward Layer
- 2D Locally-connected Backward Layer
- Reshape ForwardLayer
- Reshape BackwardLayer
- Concat ForwardLayer
- Concat BackwardLayer
- Split Forward Layer
- Split Backward Layer
- Softmax ForwardLayer
- Softmax BackwardLayer
- Loss Forward Layer
- Loss Backward Layer
- Loss Softmax Cross-entropy ForwardLayer
- Loss Softmax Cross-entropy BackwardLayer
- Loss Logistic Cross-entropy ForwardLayer
- Loss Logistic Cross-entropy BackwardLayer
- Exponential Linear Unit Forward Layer
- Exponential Linear Unit Backward Layer
Intel DAAL AI加速——神经网络的更多相关文章
- Intel DAAL AI加速——支持从数据预处理到模型预测,数据源必须使用DAAL的底层封装库
数据源加速见官方文档(必须使用DAAL自己的库): Data Management Numeric Tables Tensors Data Sources Data Dictionaries Data ...
- Intel DAAL AI加速 ——传统决策树和随机森林
# file: dt_cls_dense_batch.py #===================================================================== ...
- 英特尔® 至强® 平台集成 AI 加速构建数据中心智慧网络
英特尔 至强 平台集成 AI 加速构建数据中心智慧网络 SNA 通过 AI 方法来实时感知网络状态,基于网络数据分析来实现自动化部署和风险预测,从而让企业网络能更智能.更高效地为最终用户业务提供支撑. ...
- 释放至强平台 AI 加速潜能 汇医慧影打造全周期 AI 医学影像解决方案
基于英特尔架构实现软硬协同加速,显著提升新冠肺炎.乳腺癌等疾病的检测和筛查效率,并帮助医疗科研平台预防"维度灾难"问题 <PAGE 1 LEFT COLUMN: CUSTOM ...
- tesorflow - create neural network+结果可视化+加速神经网络训练+Optimizer+TensorFlow
以下仅为了自己方便查看,绝大部分参考来源:莫烦Python,建议去看原博客 一.添加层 def add_layer() 定义 add_layer()函数 在 Tensorflow 里定义一个添加层的函 ...
- Intel daal数据预处理
https://software.intel.com/en-us/daal-programming-guide-datasource-featureextraction-py # file: data ...
- TensorFlow实战第三课(可视化、加速神经网络训练)
matplotlib可视化 构件图形 用散点图描述真实数据之间的关系(plt.ion()用于连续显示) # plot the real data fig = plt.figure() ax = fig ...
- deeplearning.ai 卷积神经网络 Week 3 目标检测 听课笔记
本周的主题是对象检测(object detection):不但需要检测出物体(image classification),还要能定位出在图片的具体位置(classification with loca ...
- 吴恩达deepLearning.ai循环神经网络RNN学习笔记_看图就懂了!!!(理论篇)
前言 目录: RNN提出的背景 - 一个问题 - 为什么不用标准神经网络 - RNN模型怎么解决这个问题 - RNN模型适用的数据特征 - RNN几种类型 RNN模型结构 - RNN block - ...
随机推荐
- Python3 数字保留后几位
Python3 数字保留后几位 方案一: 使用Python处理精度很重要的浮点数时,建议使用内置的Decimal库: from decimal import Decimal a = Decimal(' ...
- HTML5 表单元素和属性
HTML5 表单元素和属性学习 版权声明:未经博主授权,内容严禁转载 ! 表单元素简介 无论实现提交功能还是展示页面功能,表单在HTML中的作用都十分重要. 在其他版本的HTML中,表单能够包含的元素 ...
- js 变量提升(JavaScript Scoping and Hoisting)
原文网址:http://www.cnblogs.com/betarabbit/archive/2012/01/28/2330446.html 这是一篇作者翻译过来的文章,未翻译的原文网址在这里:htt ...
- 20145307陈俊达《网络对抗》Exp5 MSF基础应用
20145307陈俊达<网络对抗>Exp5 MSF基础应用 基础问题回答 用自己的话解释什么是exploit,payload,encode? exploit就相当于是载具,各式各样的漏洞在 ...
- linux内核分析 第一周
计算机是如何工作的 冯·诺依曼理论的要点是: 数字计算机的数制采用二进制:计算机应该按照程序顺序执行. 冯·诺依曼体系结构 根据冯·诺依曼体系结构构成的计算机,必须具有如下功能:把需要的程序和数据送至 ...
- Matchvs 使用记录
Matchvs Matchvs视频教程. https://doc.matchvs.com/VideoTutorials/videogs matchvs下载资源. http://www.matchvs. ...
- CSS3实现小黄人动画
转载请注明出处,谢谢! 每次看到CSS3动画就心痒痒想试一下,记得一个多月前看了白树哥哥的一篇博客,突然开窍,于是拿他提供的demo试了一下,感觉很棒!下图为demo提供的动画帧设计稿. 自己也想说搞 ...
- C++ tinyXML的使用和字符编码转换
转载:http://jetyi.blog.51cto.com/1460128/761708/ 关于tinyxml使用的文档有很多(这篇文章就写的很好),这里仅提一下字符编码的转换问题,如果你不熟悉字符 ...
- POJ 3468 A Simple Problem with Integers(线段树&区间更新)题解
Description You have N integers, A1, A2, ... , AN. You need to deal with two kinds of operations. On ...
- 51nod 1413 权势二进制
本来刚开始还是想用每一位 -1的个数 然后再乘以10 不断累加 后来发现 完全不是这回事啊 因为本身就是0 和 1 所以只要记录出现的最大的数字 就是答案 因为 n >= 1 // 所以不 ...