Intel DAAL AI加速 ——传统决策树和随机森林
- # file: dt_cls_dense_batch.py
- #===============================================================================
- # Copyright 2014-2018 Intel Corporation.
- #
- # This software and the related documents are Intel copyrighted materials, and
- # your use of them is governed by the express license under which they were
- # provided to you (License). Unless the License provides otherwise, you may not
- # use, modify, copy, publish, distribute, disclose or transmit this software or
- # the related documents without Intel's prior written permission.
- #
- # This software and the related documents are provided as is, with no express
- # or implied warranties, other than those that are expressly stated in the
- # License.
- #===============================================================================
- ## <a name="DAAL-EXAMPLE-PY-DT_CLS_DENSE_BATCH"></a>
- ## \example dt_cls_dense_batch.py
- import os
- import sys
- from daal.algorithms.decision_tree.classification import prediction, training
- from daal.algorithms import classifier
- from daal.data_management import (
- FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable, MergedNumericTable
- )
- utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
- if utils_folder not in sys.path:
- sys.path.insert(0, utils_folder)
- from utils import printNumericTables
- DAAL_PREFIX = os.path.join('..', 'data')
- # Input data set parameters
- trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_train.csv')
- pruneDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_prune.csv')
- testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'decision_tree_test.csv')
- nFeatures = 5
- nClasses = 5
- # Model object for the decision tree classification algorithm
- model = None
- predictionResult = None
- testGroundTruth = None
- def trainModel():
- global model
- # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
- trainDataSource = FileDataSource(
- trainDatasetFileName,
- DataSourceIface.notAllocateNumericTable,
- DataSourceIface.doDictionaryFromContext
- )
- # Create Numeric Tables for training data and labels
- trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
- trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
- mergedData = MergedNumericTable(trainData, trainGroundTruth)
- # Retrieve the data from the input file
- trainDataSource.loadDataBlock(mergedData)
- # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
- pruneDataSource = FileDataSource(
- pruneDatasetFileName,
- DataSourceIface.notAllocateNumericTable,
- DataSourceIface.doDictionaryFromContext
- )
- # Create Numeric Tables for pruning data and labels
- pruneData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
- pruneGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
- pruneMergedData = MergedNumericTable(pruneData, pruneGroundTruth)
- # Retrieve the data from the input file
- pruneDataSource.loadDataBlock(pruneMergedData)
- # Create an algorithm object to train the decision tree classification model
- algorithm = training.Batch(nClasses)
- # Pass the training data set and dependent values to the algorithm
- algorithm.input.set(classifier.training.data, trainData)
- algorithm.input.set(classifier.training.labels, trainGroundTruth)
- algorithm.input.setTable(training.dataForPruning, pruneData)
- algorithm.input.setTable(training.labelsForPruning, pruneGroundTruth)
- # Train the decision tree classification model and retrieve the results of the training algorithm
- trainingResult = algorithm.compute()
- model = trainingResult.get(classifier.training.model)
- def testModel():
- global testGroundTruth, predictionResult
- # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
- testDataSource = FileDataSource(
- testDatasetFileName,
- DataSourceIface.notAllocateNumericTable,
- DataSourceIface.doDictionaryFromContext
- )
- # Create Numeric Tables for testing data and labels
- testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
- testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
- mergedData = MergedNumericTable(testData, testGroundTruth)
- # Retrieve the data from input file
- testDataSource.loadDataBlock(mergedData)
- # Create algorithm objects for decision tree classification prediction with the default method
- algorithm = prediction.Batch()
- # Pass the testing data set and trained model to the algorithm
- #print("Number of columns: {}".format(testData.getNumberOfColumns()))
- algorithm.input.setTable(classifier.prediction.data, testData)
- algorithm.input.setModel(classifier.prediction.model, model)
- # Compute prediction results and retrieve algorithm results
- # (Result class from classifier.prediction)
- predictionResult = algorithm.compute()
- def printResults():
- printNumericTables(
- testGroundTruth,
- predictionResult.get(classifier.prediction.prediction),
- "Ground truth", "Classification results",
- "Decision tree classification results (first 20 observations):",
- 20, flt64=False
- )
- if __name__ == "__main__":
- trainModel()
- testModel()
- printResults()
随机森林的:
- # file: df_cls_dense_batch.py
- #===============================================================================
- # Copyright 2014-2018 Intel Corporation.
- #
- # This software and the related documents are Intel copyrighted materials, and
- # your use of them is governed by the express license under which they were
- # provided to you (License). Unless the License provides otherwise, you may not
- # use, modify, copy, publish, distribute, disclose or transmit this software or
- # the related documents without Intel's prior written permission.
- #
- # This software and the related documents are provided as is, with no express
- # or implied warranties, other than those that are expressly stated in the
- # License.
- #===============================================================================
- ## <a name="DAAL-EXAMPLE-PY-DF_CLS_DENSE_BATCH"></a>
- ## \example df_cls_dense_batch.py
- import os
- import sys
- from daal.algorithms import decision_forest
- from daal.algorithms.decision_forest.classification import prediction, training
- from daal.algorithms import classifier
- from daal.data_management import (
- FileDataSource, DataSourceIface, NumericTableIface, HomogenNumericTable,
- MergedNumericTable, features
- )
- utils_folder = os.path.realpath(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
- if utils_folder not in sys.path:
- sys.path.insert(0, utils_folder)
- from utils import printNumericTable, printNumericTables
- DAAL_PREFIX = os.path.join('..', 'data')
- # Input data set parameters
- trainDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_train.csv')
- testDatasetFileName = os.path.join(DAAL_PREFIX, 'batch', 'df_classification_test.csv')
- nFeatures = 3
- nClasses = 5
- # Decision forest parameters
- nTrees = 10
- minObservationsInLeafNode = 8
- # Model object for the decision forest classification algorithm
- model = None
- predictionResult = None
- testGroundTruth = None
- def trainModel():
- global model
- # Initialize FileDataSource<CSVFeatureManager> to retrieve the input data from a .csv file
- trainDataSource = FileDataSource(
- trainDatasetFileName,
- DataSourceIface.notAllocateNumericTable,
- DataSourceIface.doDictionaryFromContext
- )
- # Create Numeric Tables for training data and labels
- trainData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
- trainGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
- mergedData = MergedNumericTable(trainData, trainGroundTruth)
- # Retrieve the data from the input file
- trainDataSource.loadDataBlock(mergedData)
- # Get the dictionary and update it with additional information about data
- dict = trainData.getDictionary()
- # Add a feature type to the dictionary
- dict[0].featureType = features.DAAL_CONTINUOUS
- dict[1].featureType = features.DAAL_CONTINUOUS
- dict[2].featureType = features.DAAL_CATEGORICAL
- # Create an algorithm object to train the decision forest classification model
- algorithm = training.Batch(nClasses)
- algorithm.parameter.nTrees = nTrees
- algorithm.parameter.minObservationsInLeafNode = minObservationsInLeafNode
- algorithm.parameter.featuresPerNode = nFeatures
- algorithm.parameter.varImportance = decision_forest.training.MDI
- algorithm.parameter.resultsToCompute = decision_forest.training.computeOutOfBagError
- # Pass the training data set and dependent values to the algorithm
- algorithm.input.set(classifier.training.data, trainData)
- algorithm.input.set(classifier.training.labels, trainGroundTruth)
- # Train the decision forest classification model and retrieve the results of the training algorithm
- trainingResult = algorithm.compute()
- model = trainingResult.get(classifier.training.model)
- printNumericTable(trainingResult.getTable(training.variableImportance), "Variable importance results: ")
- printNumericTable(trainingResult.getTable(training.outOfBagError), "OOB error: ")
- def testModel():
- global testGroundTruth, predictionResult
- # Initialize FileDataSource<CSVFeatureManager> to retrieve the test data from a .csv file
- testDataSource = FileDataSource(
- testDatasetFileName,
- DataSourceIface.notAllocateNumericTable,
- DataSourceIface.doDictionaryFromContext
- )
- # Create Numeric Tables for testing data and labels
- testData = HomogenNumericTable(nFeatures, 0, NumericTableIface.notAllocate)
- testGroundTruth = HomogenNumericTable(1, 0, NumericTableIface.notAllocate)
- mergedData = MergedNumericTable(testData, testGroundTruth)
- # Retrieve the data from input file
- testDataSource.loadDataBlock(mergedData)
- # Get the dictionary and update it with additional information about data
- dict = testData.getDictionary()
- # Add a feature type to the dictionary
- dict[0].featureType = features.DAAL_CONTINUOUS
- dict[1].featureType = features.DAAL_CONTINUOUS
- dict[2].featureType = features.DAAL_CATEGORICAL
- # Create algorithm objects for decision forest classification prediction with the default method
- algorithm = prediction.Batch(nClasses)
- # Pass the testing data set and trained model to the algorithm
- algorithm.input.setTable(classifier.prediction.data, testData)
- algorithm.input.setModel(classifier.prediction.model, model)
- # Compute prediction results and retrieve algorithm results
- # (Result class from classifier.prediction)
- predictionResult = algorithm.compute()
- def printResults():
- printNumericTable(predictionResult.get(classifier.prediction.prediction),"Decision forest prediction results (first 10 rows):",10)
- printNumericTable(testGroundTruth, "Ground truth (first 10 rows):", 10);
- if __name__ == "__main__":
- trainModel()
- testModel()
- printResults()
Intel DAAL AI加速 ——传统决策树和随机森林的更多相关文章
- Intel DAAL AI加速——支持从数据预处理到模型预测,数据源必须使用DAAL的底层封装库
数据源加速见官方文档(必须使用DAAL自己的库): Data Management Numeric Tables Tensors Data Sources Data Dictionaries Data ...
- Intel DAAL AI加速——神经网络
# file: neural_net_dense_batch.py #================================================================= ...
- R语言︱决策树族——随机森林算法
每每以为攀得众山小,可.每每又切实来到起点,大牛们,缓缓脚步来俺笔记葩分享一下吧,please~ --------------------------- 笔者寄语:有一篇<有监督学习选择深度学习 ...
- [ML学习笔记] 决策树与随机森林(Decision Tree&Random Forest)
[ML学习笔记] 决策树与随机森林(Decision Tree&Random Forest) 决策树 决策树算法以树状结构表示数据分类的结果.每个决策点实现一个具有离散输出的测试函数,记为分支 ...
- web安全之机器学习入门——3.2 决策树与随机森林
目录 简介 决策树简单用法 决策树检测P0P3爆破 决策树检测FTP爆破 随机森林检测FTP爆破 简介 决策树和随机森林算法是最常见的分类算法: 决策树,判断的逻辑很多时候和人的思维非常接近. 随机森 ...
- 逻辑斯蒂回归VS决策树VS随机森林
LR 与SVM 不同 1.logistic regression适合需要得到一个分类概率的场景,SVM则没有分类概率 2.LR其实同样可以使用kernel,但是LR没有support vector在计 ...
- Machine Learning笔记整理 ------ (五)决策树、随机森林
1. 决策树 一般的,一棵决策树包含一个根结点.若干内部结点和若干叶子结点,叶子节点对应决策结果,其他每个结点对应一个属性测试,每个结点包含的样本集合根据属性测试结果被划分到子结点中,而根结点包含样本 ...
- 美团店铺评价语言处理以及分类(tfidf,SVM,决策树,随机森林,Knn,ensemble)
第一篇 数据清洗与分析部分 第二篇 可视化部分, 第三篇 朴素贝叶斯文本分类 支持向量机分类 支持向量机 网格搜索 临近法 决策树 随机森林 bagging方法 import pandas as pd ...
- chapter02 三种决策树模型:单一决策树、随机森林、GBDT(梯度提升决策树) 预测泰坦尼克号乘客生还情况
单一标准的决策树:会根每维特征对预测结果的影响程度进行排序,进而决定不同特征从上至下构建分类节点的顺序.Random Forest Classifier:使用相同的训练样本同时搭建多个独立的分类模型, ...
随机推荐
- P1852 [国家集训队]跳跳棋
P1852 [国家集训队]跳跳棋 lca+二分 详细解析见题解 对于每组跳棋,我们可以用一个三元组(x,y,z)表示 我们发现,这个三元组的转移具有唯一性,收束性 也就是说,把每个三元组当成点,以转移 ...
- system.data.sqlite的源代码下载
帮助文档 http://system.data.sqlite.org/index.html/doc/trunk/www/index.wiki 历史版本https://system.data.sqlit ...
- 精巧好用的DelayQueue 转
我们谈一下实际的场景吧.我们在开发中,有如下场景 a) 关闭空闲连接.服务器中,有很多客户端的连接,空闲一段时间之后需要关闭之.b) 缓存.缓存中的对象,超过了空闲时间,需要从缓存中移出.c) 任务超 ...
- IntelliJ IDEA问题总结
在使用Idea的过程中,会遇到各种各样的问题,下面我将在这里持续总结: 1.Unable to import maven project: See logs for details 在遇到这个问题时, ...
- python 获取当前时间戳
#!/usr/bin/python # -*- coding: UTF- -*- import time; # 引入time模块 ticks = time.time() print("当前时 ...
- c++ 对符合条件的元素进行计数(count_if)
#include <iostream> // cout #include <algorithm> // count_if #include <vector> // ...
- md5 32位 加密原理 Java实现md5加密
md5 32位 加密原理 简单概括起来,MD5 算法的过程分为四步:处理原文,设置初始值,循环加工,拼接结果. 第一步:处理原文 首先,我们计算出原文长度(bit)对 512 求余的结果,如果不等于 ...
- Selenium库的使用
一.什么是Selenium selenium 是一套完整的web应用程序测试系统,包含了测试的录制(selenium IDE),编写及运行(Selenium Remote Control)和测试的并行 ...
- Qt加载OSg视图例子
//QT += core gui opengl //LIBS += -losgViewer -losgDB -losgUtil -losg -lOpenThreads -losgGA -losgQt ...
- [ios][swift]swift中如果做基本类型的转换
在swift中如果做基本类型的转换的?比如Int -> Float(Double)Double -> 保留两位小数String -> IntDouble -> String 有 ...