from pyspark import SparkContext, SQLContext
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator # Load the data stored in LIBSVM format as a DataFrame.
data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt") # Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# We specify maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data) # Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3]) # Train a DecisionTree model.
dt = DecisionTreeClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures") # Chain indexers and tree in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, dt]) # Train model. This also runs the indexers.
model = pipeline.fit(trainingData) # Make predictions.
predictions = model.transform(testData) # Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5) # Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g " % (1.0 - accuracy)) treeModel = model.stages[2]
# summary only
print(treeModel) ############################# from pyspark.ml.tuning import ParamGridBuilder, CrossValidator # Create ParamGrid for Cross Validation
paramGrid = (ParamGridBuilder()
.addGrid(lr.regParam, [0.01, 0.5, 2.0])
.addGrid(lr.elasticNetParam, [0.0, 0.5, 1.0])
.addGrid(lr.maxIter, [1, 5, 10])
.build())
Copy to clipboardCopy
# Create 5-fold CrossValidator
cv = CrossValidator(estimator=lr, estimatorParamMaps=paramGrid, evaluator=evaluator, numFolds=5) # Run cross validations
cvModel = cv.fit(trainingData)
# this will likely take a fair amount of time because of the amount of models that we're creating and testing # Use test set here so we can measure the accuracy of our model on new data
predictions = cvModel.transform(testData) # cvModel uses the best model found from the Cross Validation
# Evaluate best model
evaluator.evaluate(predictions) #We can also access the model’s feature weights and intercepts easily print 'Model Intercept: ', cvModel.bestModel.intercept
ML provides CrossValidator class which can be used to perform cross-validation and parameter search. Assuming your data is already preprocessed you can add cross-validation as follows:

import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}
import org.apache.spark.ml.classification.RandomForestClassifier
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator // [label: double, features: vector]
trainingData org.apache.spark.sql.DataFrame = ???
val nFolds: Int = ???
val NumTrees: Int = ???
val metric: String = ??? val rf = new RandomForestClassifier()
.setLabelCol("label")
.setFeaturesCol("features")
.setNumTrees(NumTrees) val pipeline = new Pipeline().setStages(Array(rf)) val paramGrid = new ParamGridBuilder().build() // No parameter search val evaluator = new MulticlassClassificationEvaluator()
.setLabelCol("label")
.setPredictionCol("prediction")
// "f1" (default), "weightedPrecision", "weightedRecall", "accuracy"
.setMetricName(metric) val cv = new CrossValidator()
// ml.Pipeline with ml.classification.RandomForestClassifier
.setEstimator(pipeline)
// ml.evaluation.MulticlassClassificationEvaluator
.setEvaluator(evaluator)
.setEstimatorParamMaps(paramGrid)
.setNumFolds(nFolds) val model = cv.fit(trainingData) // trainingData: DataFrame
Using PySpark: from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator trainingData = ... # DataFrame[label: double, features: vector]
numFolds = ... # Integer rf = RandomForestClassifier(labelCol="label", featuresCol="features")
evaluator = MulticlassClassificationEvaluator() # + other params as in Scala pipeline = Pipeline(stages=[rf]) crossval = CrossValidator(
estimator=pipeline,
estimatorParamMaps=paramGrid,
evaluator=evaluator,
numFolds=numFolds) model = crossval.fit(trainingData)

spark 针对决策树进行交叉验证的更多相关文章

  1. Spark机器学习——模型选择与参数调优之交叉验证

    spark 模型选择与超参调优 机器学习可以简单的归纳为 通过数据训练y = f(x) 的过程,因此定义完训练模型之后,就需要考虑如何选择最终我们认为最优的模型. 如何选择最优的模型,就是本篇的主要内 ...

  2. 什么是机器学习的分类算法?【K-近邻算法(KNN)、交叉验证、朴素贝叶斯算法、决策树、随机森林】

    1.K-近邻算法(KNN) 1.1 定义 (KNN,K-NearestNeighbor) 如果一个样本在特征空间中的k个最相似(即特征空间中最邻近)的样本中的大多数属于某一个类别,则该样本也属于这个类 ...

  3. Spark2.0机器学习系列之2:基于Pipeline、交叉验证、ParamMap的模型选择和超参数调优

    Spark中的CrossValidation Spark中采用是k折交叉验证 (k-fold cross validation).举个例子,例如10折交叉验证(10-fold cross valida ...

  4. 几种交叉验证(cross validation)方式的比较

    模型评价的目的:通过模型评价,我们知道当前训练模型的好坏,泛化能力如何?从而知道是否可以应用在解决问题上,如果不行,那又是哪里出了问题? train_test_split 在分类问题中,我们通常通过对 ...

  5. sklearn 中的交叉验证

    sklearn中的交叉验证(Cross-Validation) sklearn是利用python进行机器学习中一个非常全面和好用的第三方库,用过的都说好.今天主要记录一下sklearn中关于交叉验证的 ...

  6. sklearn中的交叉验证(Cross-Validation)

    这个repo 用来记录一些python技巧.书籍.学习链接等,欢迎stargithub地址sklearn是利用python进行机器学习中一个非常全面和好用的第三方库,用过的都说好.今天主要记录一下sk ...

  7. 【scikit-learn】交叉验证及其用于參数选择、模型选择、特征选择的样例

     内容概要¶ 训练集/測试集切割用于模型验证的缺点 K折交叉验证是怎样克服之前的不足 交叉验证怎样用于选择调节參数.选择模型.选择特征 改善交叉验证 1. 模型验证回想¶ 进行模型验证的一个重要目 ...

  8. 十折交叉验证10-fold cross validation, 数据集划分 训练集 验证集 测试集

    机器学习 数据挖掘 数据集划分 训练集 验证集 测试集 Q:如何将数据集划分为测试数据集和训练数据集? A:three ways: 1.像sklearn一样,提供一个将数据集切分成训练集和测试集的函数 ...

  9. 机器学习- Sklearn (交叉验证和Pipeline)

    前面一节咱们已经介绍了决策树的原理已经在sklearn中的应用.那么这里还有两个数据处理和sklearn应用中的小知识点咱们还没有讲,但是在实践中却会经常要用到的,那就是交叉验证cross_valid ...

随机推荐

  1. ASP.NET-SOAP、UDDI知识点

    1. 什么是SOAP? 答:是简单访问协议.是在分布式环境中,交换信息并实现远程调用的协议.是一个基于XML的协议.使用SOAP,可以不考虑任何传输协议,但通常还是HTTP协议,可以允许任何类型的对象 ...

  2. VC6.0VB6.0 Scratch等软件

    VC6.0VB6.0 Scratch等软件 http://pan.baidu.com/s/1nv4hJrb

  3. Microsoft Updateclient更新

     大家好, 微软Microsoft Update产品组官方博客于昨天宣布了有关最新的Windows Updateclient更新的消息.依据这则博客.微软从当日開始逐渐向全部Windows 7, ...

  4. GNU Linux中的SO_RCVLOWAT和SO_SNDLOWAT说明

    /*********************************************************************  * Author  : Samson  * Date   ...

  5. java ee5的新特性

    1.标注 一种元数据,作用分为三类:编写文档@Document.代码分析@Deparecated(过时的)和编译检查@override(重写) 2.EJB3 EJB2的升级版,商业化的java bea ...

  6. 浅谈SaaS应用开发的难度

    近期做SaaS应用的非常多,这样的模式是未来的一种趋势,这样的模式的最大优点就是云计算的优点--节约资源.网上有非常多人觉得SaaS非常easy,就是一个多用户租赁模式.这样的认识也不能说不正确.由于 ...

  7. Mosquito的优化——epoll优化(七)

    本文由逍遥子撰写,转发请标注原址: http://blog.csdn.net/houjixin/article/details/46413583 或 http://houjixin.blog.163. ...

  8. mvc架构的简单登录系统,jsp

    文件结构 三个jsp文件负责前段界面的实现 login.jsp <%@ page language="java" import="java.util.*" ...

  9. BZOJ 3165 李超线段树

    思路: 李超线段树 我是把线段转成斜率的形式搞得 不知道有没有更简单的方法 //By SiriusRen #include <cmath> #include <cstdio> ...

  10. 访问Storm ui界面,出现org.apache.storm.utils.NimbusLeaderNotFoundException: Could not find leader nimbus from seed hosts ["master" "slave1"]. Did you specify a valid list of nimbus hosts for confi的问题解决(图文详解)

    不多说,直接上干货! 前期博客 apache-storm-1.0.2.tar.gz的集群搭建(3节点)(图文详解)(非HA和HA)  问题详情 org.apache.storm.utils.Nimbu ...