1. # file: neural_net_dense_batch.py
  2. #===============================================================================
  3. # Copyright 2014-2018 Intel Corporation.
  4. #
  5. # This software and the related documents are Intel copyrighted materials, and
  6. # your use of them is governed by the express license under which they were
  7. # provided to you (License). Unless the License provides otherwise, you may not
  8. # use, modify, copy, publish, distribute, disclose or transmit this software or
  9. # the related documents without Intel's prior written permission.
  10. #
  11. # This software and the related documents are provided as is, with no express
  12. # or implied warranties, other than those that are expressly stated in the
  13. # License.
  14. #===============================================================================
  15.  
  16. #
  17. # ! Content:
  18. # ! Python example of neural network training and scoring
  19. # !*****************************************************************************
  20.  
  21. #
  22. ## <a name="DAAL-EXAMPLE-PY-NEURAL_NET_DENSE_BATCH"></a>
  23. ## \example neural_net_dense_batch.py
  24. #
  25.  
  26. import os
  27. import sys
  28.  
  29. import numpy as np
  30.  
  31. from daal.algorithms.neural_networks import initializers
  32. from daal.algorithms.neural_networks import layers
  33. from daal.algorithms import optimization_solver
  34. from daal.algorithms.neural_networks import training, prediction
  35. from daal.data_management import NumericTable, HomogenNumericTable
  36.  
  37. utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
  38. if utils_folder not in sys.path:
  39. sys.path.insert(0, utils_folder)
  40. from utils import printTensors, readTensorFromCSV
  41.  
  42. # Input data set parameters
  43. trainDatasetFile = os.path.join("..", "data", "batch", "neural_network_train.csv")
  44. trainGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_train_ground_truth.csv")
  45. testDatasetFile = os.path.join("..", "data", "batch", "neural_network_test.csv")
  46. testGroundTruthFile = os.path.join("..", "data", "batch", "neural_network_test_ground_truth.csv")
  47.  
  48. fc1 = 0
  49. fc2 = 1
  50. sm1 = 2
  51.  
  52. batchSize = 10
  53.  
  54. def configureNet():
  55. # Create layers of the neural network
  56. # Create fully-connected layer and initialize layer parameters
  57. fullyConnectedLayer1 = layers.fullyconnected.Batch(5)
  58. fullyConnectedLayer1.parameter.weightsInitializer = initializers.uniform.Batch(-0.001, 0.001)
  59. fullyConnectedLayer1.parameter.biasesInitializer = initializers.uniform.Batch(0, 0.5)
  60.  
  61. # Create fully-connected layer and initialize layer parameters
  62. fullyConnectedLayer2 = layers.fullyconnected.Batch(2)
  63. fullyConnectedLayer2.parameter.weightsInitializer = initializers.uniform.Batch(0.5, 1)
  64. fullyConnectedLayer2.parameter.biasesInitializer = initializers.uniform.Batch(0.5, 1)
  65.  
  66. # Create softmax layer and initialize layer parameters
  67. softmaxCrossEntropyLayer = layers.loss.softmax_cross.Batch()
  68.  
  69. # Create configuration of the neural network with layers
  70. topology = training.Topology()
  71.  
  72. # Add layers to the topology of the neural network
  73. topology.push_back(fullyConnectedLayer1)
  74. topology.push_back(fullyConnectedLayer2)
  75. topology.push_back(softmaxCrossEntropyLayer)
  76. topology.get(fc1).addNext(fc2)
  77. topology.get(fc2).addNext(sm1)
  78. return topology
  79.  
  80. def trainModel():
  81. # Read training data set from a .csv file and create a tensor to store input data
  82. trainingData = readTensorFromCSV(trainDatasetFile)
  83. trainingGroundTruth = readTensorFromCSV(trainGroundTruthFile, True)
  84.  
  85. sgdAlgorithm = optimization_solver.sgd.Batch(fptype=np.float32)
  86.  
  87. # Set learning rate for the optimization solver used in the neural network
  88. learningRate = 0.001
  89. sgdAlgorithm.parameter.learningRateSequence = HomogenNumericTable(1, 1, NumericTable.doAllocate, learningRate)
  90. # Set the batch size for the neural network training
  91. sgdAlgorithm.parameter.batchSize = batchSize
  92. sgdAlgorithm.parameter.nIterations = int(trainingData.getDimensionSize(0) / sgdAlgorithm.parameter.batchSize)
  93.  
  94. # Create an algorithm to train neural network
  95. net = training.Batch(sgdAlgorithm)
  96.  
  97. sampleSize = trainingData.getDimensions()
  98. sampleSize[0] = batchSize
  99.  
  100. # Configure the neural network
  101. topology = configureNet()
  102. net.initialize(sampleSize, topology)
  103.  
  104. # Pass a training data set and dependent values to the algorithm
  105. net.input.setInput(training.data, trainingData)
  106. net.input.setInput(training.groundTruth, trainingGroundTruth)
  107.  
  108. # Run the neural network training and retrieve training model
  109. trainingModel = net.compute().get(training.model)
  110. # return prediction model
  111. return trainingModel.getPredictionModel_Float32()
  112.  
  113. def testModel(predictionModel):
  114. # Read testing data set from a .csv file and create a tensor to store input data
  115. predictionData = readTensorFromCSV(testDatasetFile)
  116.  
  117. # Create an algorithm to compute the neural network predictions
  118. net = prediction.Batch()
  119.  
  120. net.parameter.batchSize = predictionData.getDimensionSize(0)
  121.  
  122. # Set input objects for the prediction neural network
  123. net.input.setModelInput(prediction.model, predictionModel)
  124. net.input.setTensorInput(prediction.data, predictionData)
  125.  
  126. # Run the neural network prediction
  127. # and return results of the neural network prediction
  128. return net.compute()
  129.  
  130. def printResults(predictionResult):
  131. # Read testing ground truth from a .csv file and create a tensor to store the data
  132. predictionGroundTruth = readTensorFromCSV(testGroundTruthFile)
  133.  
  134. printTensors(predictionGroundTruth, predictionResult.getResult(prediction.prediction),
  135. "Ground truth", "Neural network predictions: each class probability",
  136. "Neural network classification results (first 20 observations):", 20)
  137.  
  138. topology = ""
  139. if __name__ == "__main__":
  140.  
  141. predictionModel = trainModel()
  142.  
  143. predictionResult = testModel(predictionModel)
  144.  
  145. printResults(predictionResult)

  目前支持的Layers

    • Common Parameters
    • Fully Connected Forward Layer
    • Fully Connected Backward Layer
    • Absolute Value ForwardLayer
    • Absolute Value Backward Layer
    • Logistic ForwardLayer
    • Logistic BackwardLayer
    • pReLU ForwardLayer
    • pReLU BackwardLayer
    • ReLU Forward Layer
    • ReLU BackwardLayer
    • SmoothReLU ForwardLayer
    • SmoothReLU BackwardLayer
    • Hyperbolic Tangent Forward Layer
    • Hyperbolic Tangent Backward Layer
    • Batch Normalization Forward Layer
    • Batch Normalization Backward Layer
    • Local-Response Normalization ForwardLayer
    • Local-Response Normalization Backward Layer
    • Local-Contrast Normalization ForwardLayer
    • Local-Contrast Normalization Backward Layer
    • Dropout ForwardLayer
    • Dropout BackwardLayer
    • 1D Max Pooling Forward Layer
    • 1D Max Pooling Backward Layer
    • 2D Max Pooling Forward Layer
    • 2D Max Pooling Backward Layer
    • 3D Max Pooling Forward Layer
    • 3D Max Pooling Backward Layer
    • 1D Average Pooling Forward Layer
    • 1D Average Pooling Backward Layer
    • 2D Average Pooling Forward Layer
    • 2D Average Pooling Backward Layer
    • 3D Average Pooling Forward Layer
    • 3D Average Pooling Backward Layer
    • 2D Stochastic Pooling Forward Layer
    • 2D Stochastic Pooling Backward Layer
    • 2D Spatial Pyramid Pooling ForwardLayer
    • 2D Spatial Pyramid Pooling BackwardLayer
    • 2D Convolution Forward Layer
    • 2D Convolution Backward Layer
    • 2D Transposed Convolution ForwardLayer
    • 2D Transposed Convolution BackwardLayer
    • 2D Locally-connected Forward Layer
    • 2D Locally-connected Backward Layer
    • Reshape ForwardLayer
    • Reshape BackwardLayer
    • Concat ForwardLayer
    • Concat BackwardLayer
    • Split Forward Layer
    • Split Backward Layer
    • Softmax ForwardLayer
    • Softmax BackwardLayer
    • Loss Forward Layer
    • Loss Backward Layer
    • Loss Softmax Cross-entropy ForwardLayer
    • Loss Softmax Cross-entropy BackwardLayer
    • Loss Logistic Cross-entropy ForwardLayer
    • Loss Logistic Cross-entropy BackwardLayer
    • Exponential Linear Unit Forward Layer
    • Exponential Linear Unit Backward Layer

Intel DAAL AI加速——神经网络的更多相关文章

  1. Intel DAAL AI加速——支持从数据预处理到模型预测,数据源必须使用DAAL的底层封装库

    数据源加速见官方文档(必须使用DAAL自己的库): Data Management Numeric Tables Tensors Data Sources Data Dictionaries Data ...

  2. Intel DAAL AI加速 ——传统决策树和随机森林

    # file: dt_cls_dense_batch.py #===================================================================== ...

  3. 英特尔® 至强® 平台集成 AI 加速构建数据中心智慧网络

    英特尔 至强 平台集成 AI 加速构建数据中心智慧网络 SNA 通过 AI 方法来实时感知网络状态,基于网络数据分析来实现自动化部署和风险预测,从而让企业网络能更智能.更高效地为最终用户业务提供支撑. ...

  4. 释放至强平台 AI 加速潜能 汇医慧影打造全周期 AI 医学影像解决方案

    基于英特尔架构实现软硬协同加速,显著提升新冠肺炎.乳腺癌等疾病的检测和筛查效率,并帮助医疗科研平台预防"维度灾难"问题 <PAGE 1 LEFT COLUMN: CUSTOM ...

  5. tesorflow - create neural network+结果可视化+加速神经网络训练+Optimizer+TensorFlow

    以下仅为了自己方便查看,绝大部分参考来源:莫烦Python,建议去看原博客 一.添加层 def add_layer() 定义 add_layer()函数 在 Tensorflow 里定义一个添加层的函 ...

  6. Intel daal数据预处理

    https://software.intel.com/en-us/daal-programming-guide-datasource-featureextraction-py # file: data ...

  7. TensorFlow实战第三课(可视化、加速神经网络训练)

    matplotlib可视化 构件图形 用散点图描述真实数据之间的关系(plt.ion()用于连续显示) # plot the real data fig = plt.figure() ax = fig ...

  8. deeplearning.ai 卷积神经网络 Week 3 目标检测 听课笔记

    本周的主题是对象检测(object detection):不但需要检测出物体(image classification),还要能定位出在图片的具体位置(classification with loca ...

  9. 吴恩达deepLearning.ai循环神经网络RNN学习笔记_看图就懂了!!!(理论篇)

    前言 目录: RNN提出的背景 - 一个问题 - 为什么不用标准神经网络 - RNN模型怎么解决这个问题 - RNN模型适用的数据特征 - RNN几种类型 RNN模型结构 - RNN block - ...

随机推荐

  1. bzoj1660 / P2866 [USACO06NOV]糟糕的一天Bad Hair Day

    P2866 [USACO06NOV]糟糕的一天Bad Hair Day 奶牛题里好多单调栈..... 维护一个单调递减栈,存每只牛的高度和位置,顺便统计一下答案. #include<iostre ...

  2. c++中的字符集与中文

    就非西欧字符而言,比如中国以及港澳台,在任何编程语言的开发中都不得不考虑字符集及其表示.在c++中,对于超过1个字节的字符,有两种方式可以表示: 1.多字节表示法:通常用于存储(空间效率考虑). 2. ...

  3. Java filter中的chain.doFilter详解

    转载: 一.chain.doFilter作用 1.一般filter都是一个链,web.xml 里面配置了几个就有几个.一个一个的连在一起 request -> filter1 -> fil ...

  4. HDU 5887 Herbs Gathering(搜索求01背包)

    http://acm.hdu.edu.cn/showproblem.php?pid=5887 题意: 容量很大的01背包. 思路: 因为这道题目背包容量比较大,所以用dp是行不通的.所以得用搜索来做, ...

  5. UVa 10118 免费糖果(记忆化搜索+哈希)

    https://vjudge.net/problem/UVA-10118 题意: 桌上有4堆糖果,每堆有N颗.佳佳有一个最多可以装5颗糖的小篮子.他每次选择一堆糖果,把最顶上的一颗拿到篮子里.如果篮子 ...

  6. SVN基本命令总结

    1.svn add [path] 预定添加文件或者目录到版本库,这些add的文件会在下一次提交文件时同步到版本服务器. 2.svn commit [path] 提交文件到版本服务器. 3.svn co ...

  7. React Native API之 ActionSheetIOS

    ok!先来演示是下效果: 官网教程:http://reactnative.cn/docs/0.31/actionsheetios.html#content 上代码:引入API组件: import Re ...

  8. AtCoder Grand Contest 013 C :Ants on a Circle

    本文版权归ljh2000和博客园共有,欢迎转载,但须保留此声明,并给出原文链接,谢谢合作. 本文作者:ljh2000 作者博客:http://www.cnblogs.com/ljh2000-jump/ ...

  9. OpenVPN Windows 平台安装部署教程

    一.环境准备: 操作系统Windows 服务器IP:192.168.88.123  VPN:192.168.89.1 客户端IP:192.168.78.3 客户端服务端单网卡,路由器做好端口映射 安装 ...

  10. [ios]sqlite轻量级数据库学习连接

    SQLLite (一)基本介绍 http://blog.csdn.net/lyrebing/article/details/8224431 SQLLite (二) :sqlite3_open, sql ...