# file: dt_cls_dense_batch.py
#===============================================================================
# Copyright 2014-2018 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were
# provided to you (License). Unless the License provides otherwise, you may not
# use, modify, copy, publish, distribute, disclose or transmit this software or
# the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express
# or implied warranties, other than those that are expressly stated in the
# License.
#=============================================================================== ## <a name="DAAL-EXAMPLE-PY-DT_CLS_DENSE_BATCH"></a>
## \example dt_cls_dense_batch.py import os
import sys from daal.algorithms.decision_tree.classification import prediction, training
from daal.algorithms import classifier
from daal.data_management import (
FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable, MergedNumericTable
)
utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
from utils import printNumericTables DAAL_PREFIX = os.path.join('..', 'data') # Input data set parameters
trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_train.csv')
pruneDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_prune.csv')
testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_test.csv') nFeatures = 5
nClasses = 5 # Model object for the decision tree classification algorithm
model = None
predictionResult = None
testGroundTruth = None def trainModel():
global model # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
trainDataSource = FileDataSource(
trainDatasetFileName,
DataSourceIface.notAllocateNumericTable,
DataSourceIface.doDictionaryFromContext
) # Create Numeric Tables for training data and labels
trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
mergedData = MergedNumericTable(trainData, trainGroundTruth) # Retrieve the data from the input file
trainDataSource.loadDataBlock(mergedData) # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
pruneDataSource = FileDataSource(
pruneDatasetFileName,
DataSourceIface.notAllocateNumericTable,
DataSourceIface.doDictionaryFromContext
) # Create Numeric Tables for pruning data and labels
pruneData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
pruneGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
pruneMergedData = MergedNumericTable(pruneData, pruneGroundTruth) # Retrieve the data from the input file
pruneDataSource.loadDataBlock(pruneMergedData) # Create an algorithm object to train the decision tree classification model
algorithm = training.Batch(nClasses) # Pass the training data set and dependent values to the algorithm
algorithm.input.set(classifier.training.data, trainData)
algorithm.input.set(classifier.training.labels, trainGroundTruth)
algorithm.input.setTable(training.dataForPruning, pruneData)
algorithm.input.setTable(training.labelsForPruning, pruneGroundTruth) # Train the decision tree classification model and retrieve the results of the training algorithm
trainingResult = algorithm.compute()
model = trainingResult.get(classifier.training.model) def testModel():
global testGroundTruth, predictionResult # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
testDataSource = FileDataSource(
testDatasetFileName,
DataSourceIface.notAllocateNumericTable,
DataSourceIface.doDictionaryFromContext
) # Create Numeric Tables for testing data and labels
testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
mergedData = MergedNumericTable(testData, testGroundTruth) # Retrieve the data from input file
testDataSource.loadDataBlock(mergedData) # Create algorithm objects for decision tree classification prediction with the default method
algorithm = prediction.Batch() # Pass the testing data set and trained model to the algorithm
#print("Number of columns: {}".format(testData.getNumberOfColumns()))
algorithm.input.setTable(classifier.prediction.data, testData)
algorithm.input.setModel(classifier.prediction.model, model) # Compute prediction results and retrieve algorithm results
# (Result class from classifier.prediction)
predictionResult = algorithm.compute() def printResults(): printNumericTables(
testGroundTruth,
predictionResult.get(classifier.prediction.prediction),
"Ground truth", "Classification results",
"Decision tree classification results (first 20 observations):",
20, flt64=False
) if __name__ == "__main__": trainModel()
testModel()
printResults()

  

随机森林的:

# file: df_cls_dense_batch.py
#===============================================================================
# Copyright 2014-2018 Intel Corporation.
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were
# provided to you (License). Unless the License provides otherwise, you may not
# use, modify, copy, publish, distribute, disclose or transmit this software or
# the related documents without Intel's prior written permission.
#
# This software and the related documents are provided as is, with no express
# or implied warranties, other than those that are expressly stated in the
# License.
#=============================================================================== ## <a name="DAAL-EXAMPLE-PY-DF_CLS_DENSE_BATCH"></a>
## \example df_cls_dense_batch.py import os
import sys from daal.algorithms import decision_forest
from daal.algorithms.decision_forest.classification import prediction, training
from daal.algorithms import classifier
from daal.data_management import (
FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable,
MergedNumericTable, features
) utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
if utils_folder not in sys.path:
sys.path.insert(0, utils_folder)
from utils import printNumericTable, printNumericTables DAAL_PREFIX = os.path.join('..', 'data') # Input data set parameters
trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_train.csv')
testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_test.csv') nFeatures = 3
nClasses = 5 # Decision forest parameters
nTrees = 10
minObservationsInLeafNode = 8 # Model object for the decision forest classification algorithm
model = None
predictionResult = None
testGroundTruth = None def trainModel():
global model # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
trainDataSource = FileDataSource(
trainDatasetFileName,
DataSourceIface.notAllocateNumericTable,
DataSourceIface.doDictionaryFromContext
) # Create Numeric Tables for training data and labels
trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
mergedData = MergedNumericTable(trainData, trainGroundTruth) # Retrieve the data from the input file
trainDataSource.loadDataBlock(mergedData) # Get the dictionary and update it with additional information about data
dict = trainData.getDictionary() # Add a feature type to the dictionary
dict[0].featureType = features.DAAL_CONTINUOUS
dict[1].featureType = features.DAAL_CONTINUOUS
dict[2].featureType = features.DAAL_CATEGORICAL # Create an algorithm object to train the decision forest classification model
algorithm = training.Batch(nClasses)
algorithm.parameter.nTrees = nTrees
algorithm.parameter.minObservationsInLeafNode = minObservationsInLeafNode
algorithm.parameter.featuresPerNode = nFeatures
algorithm.parameter.varImportance = decision_forest.training.MDI
algorithm.parameter.resultsToCompute = decision_forest.training.computeOutOfBagError # Pass the training data set and dependent values to the algorithm
algorithm.input.set(classifier.training.data, trainData)
algorithm.input.set(classifier.training.labels, trainGroundTruth) # Train the decision forest classification model and retrieve the results of the training algorithm
trainingResult = algorithm.compute()
model = trainingResult.get(classifier.training.model)
printNumericTable(trainingResult.getTable(training.variableImportance), "Variable importance results: ")
printNumericTable(trainingResult.getTable(training.outOfBagError), "OOB error: ") def testModel():
global testGroundTruth, predictionResult # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
testDataSource = FileDataSource(
testDatasetFileName,
DataSourceIface.notAllocateNumericTable,
DataSourceIface.doDictionaryFromContext
) # Create Numeric Tables for testing data and labels
testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
mergedData = MergedNumericTable(testData, testGroundTruth) # Retrieve the data from input file
testDataSource.loadDataBlock(mergedData) # Get the dictionary and update it with additional information about data
dict = testData.getDictionary() # Add a feature type to the dictionary
dict[0].featureType = features.DAAL_CONTINUOUS
dict[1].featureType = features.DAAL_CONTINUOUS
dict[2].featureType = features.DAAL_CATEGORICAL # Create algorithm objects for decision forest classification prediction with the default method
algorithm = prediction.Batch(nClasses) # Pass the testing data set and trained model to the algorithm
algorithm.input.setTable(classifier.prediction.data, testData)
algorithm.input.setModel(classifier.prediction.model, model) # Compute prediction results and retrieve algorithm results
# (Result class from classifier.prediction)
predictionResult = algorithm.compute() def printResults():
printNumericTable(predictionResult.get(classifier.prediction.prediction),"Decision forest prediction results (first 10 rows):",10)
printNumericTable(testGroundTruth, "Ground truth (first 10 rows):", 10); if __name__ == "__main__": trainModel()
testModel()
printResults()

  

Intel DAAL AI加速 ——传统决策树和随机森林的更多相关文章

  1. Intel DAAL AI加速——支持从数据预处理到模型预测,数据源必须使用DAAL的底层封装库

    数据源加速见官方文档(必须使用DAAL自己的库): Data Management Numeric Tables Tensors Data Sources Data Dictionaries Data ...

  2. Intel DAAL AI加速——神经网络

    # file: neural_net_dense_batch.py #================================================================= ...

  3. R语言︱决策树族——随机森林算法

    每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...

  4. [ML学习笔记] 决策树与随机森林(Decision Tree&Random Forest)

    [ML学习笔记] 决策树与随机森林(Decision Tree&Random Forest) 决策树 决策树算法以树状结构表示数据分类的结果.每个决策点实现一个具有离散输出的测试函数,记为分支 ...

  5. web安全之机器学习入门——3.2 决策树与随机森林

    目录 简介 决策树简单用法 决策树检测P0P3爆破 决策树检测FTP爆破 随机森林检测FTP爆破 简介 决策树和随机森林算法是最常见的分类算法: 决策树,判断的逻辑很多时候和人的思维非常接近. 随机森 ...

  6. 逻辑斯蒂回归VS决策树VS随机森林

    LR 与SVM 不同 1.logistic regression适合需要得到一个分类概率的场景,SVM则没有分类概率 2.LR其实同样可以使用kernel,但是LR没有support vector在计 ...

  7. Machine Learning笔记整理 ------ (五)决策树、随机森林

    1. 决策树 一般的,一棵决策树包含一个根结点.若干内部结点和若干叶子结点,叶子节点对应决策结果,其他每个结点对应一个属性测试,每个结点包含的样本集合根据属性测试结果被划分到子结点中,而根结点包含样本 ...

  8. 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)

    第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...

  9. chapter02 三种决策树模型:单一决策树、随机森林、GBDT(梯度提升决策树) 预测泰坦尼克号乘客生还情况

    单一标准的决策树:会根每维特征对预测结果的影响程度进行排序,进而决定不同特征从上至下构建分类节点的顺序.Random Forest Classifier:使用相同的训练样本同时搭建多个独立的分类模型, ...

随机推荐

  1. CSS style 属性

    CSS style 属性 定义和用法 必需的 type 属性规定样式表的 MIME 类型. type 属性指示 <style> 与 </style> 标签之间的内容. 值 &q ...

  2. 01: RabbitMQ

    目录: 1.1 RabbitMq与Redis队列对比 1.2 在win7 64位机上安装RabbitMQ 1.3 RabbitMQ消息分发轮询 与 持久化 1.4 RabbitMQ 设定某个队列里最大 ...

  3. insert into 和 where not exists

    https://social.msdn.microsoft.com/Forums/sqlserver/en-US/3569bd60-1299-4fe4-bfa1-d77ffa3e579f/insert ...

  4. BZOJ1407: [Noi2002]Savage exgcd

    Description Input 第1行为一个整数N(1<=N<=15),即野人的数目. 第2行到第N+1每行为三个整数Ci, Pi, Li表示每个野人所住的初始洞穴编号,每年走过的洞穴 ...

  5. [JavaScript] - 7kyu

    Johnny is a boy who likes to open and close lockers. He loves it so much that one day, when school w ...

  6. UVa 1471 防线

    https://vjudge.net/problem/UVA-1471 题意:给出一个序列,删除一个连续子序列,使得剩下的序列中有一个长度最大的连续递增子序列,输出个数. 思路:首先可以计算出以i结尾 ...

  7. mybatis 关于传long 类型问题

    @Datapublic class PrealertPackageStatusDTO { private Integer nowStatus; /** * packageStatusEnum */ p ...

  8. C++中的也能使用正则表达式----转载

    转自:https://www.cnblogs.com/ittinybird/p/4853532.html, 以作笔记. 正则表达式Regex(regular expression)是一种强大的描述字符 ...

  9. 转载:RESTful API 设计指南

    http://www.ruanyifeng.com/blog/2014/05/restful_api.html 网络应用程序,分为前端和后端两个部分.当前的发展趋势,就是前端设备层出不穷(手机.平板. ...

  10. 利用Chrome的Heap Snapshot功能分析一个时间段内的内存占用率

    在下图测试代码第13行和第16行设断点. 以调试方式运行,首先断点在第13行处触发: 打开Chrome开发者工具,点击Profiles tab, 再点击按钮"Take Snapshot&qu ...