/**
* Created by lkl on 2018/1/16.
*/
import org.apache.spark.mllib.evaluation.BinaryClassificationMetrics
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.GradientBoostedTrees
import org.apache.spark.mllib.tree.configuration.BoostingStrategy
import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel
import org.apache.spark.sql.{Row, SaveMode}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.mutable.ArrayBuffer
object abregression3Model20180116 {
def main(args: Array[String]): Unit = { val sparkConf = new SparkConf().setAppName("abregression3Model20180116")
val sc = new SparkContext(sparkConf)
val hc = new HiveContext(sc)
val data = hc.sql(s"select * from lkl_card_score.fqz_score_dataset_03train").map {
row =>
val arr = new ArrayBuffer[Double]()
//剔除label、phone字段
for (i <- 3 until row.size) {
if (row.isNullAt(i)) {
arr += 0.0
}
else if (row.get(i).isInstanceOf[Int])
arr += row.getInt(i).toDouble
else if (row.get(i).isInstanceOf[Double])
arr += row.getDouble(i)
else if (row.get(i).isInstanceOf[Long])
arr += row.getLong(i).toDouble
else if (row.get(i).isInstanceOf[String])
arr += 0.0
}
LabeledPoint(row.getInt(2).toDouble, Vectors.dense(arr.toArray))
} // Split data into training (60%) and test (40%)
val Array(trainingData, testData) = data.randomSplit(Array(0.7,0.3), seed = 11L)
// 逻辑回归是迭代算法,所以缓存训练数据的RDD
trainingData.cache()
//使用SGD算法运行逻辑回归 val boostingStrategy = BoostingStrategy.defaultParams("Regression")
boostingStrategy.setNumIterations(40) // Note: Use more iterations in practice.
boostingStrategy.treeStrategy.setMaxDepth(6)
boostingStrategy.treeStrategy.setMinInstancesPerNode(50)
val model = GradientBoostedTrees.train(data, boostingStrategy)
model.save(sc, s"hdfs://ns1/user/songchunlin/model/abregression3Model20180116") sc.makeRDD(Seq(model.toDebugString)).repartition(1).saveAsTextFile(s"hdfs://ns1/user/songchunlin/model/toDebugString/abregression3Model20180116")
// 全量data数据打分,原本用testData打分
val omodel=GradientBoostedTreesModel.load(sc,s"hdfs://ns1/user/songchunlin/model/abregression3Model20180116")
val predictionAndLabels = data.map { case LabeledPoint(label, features) =>
val prediction = omodel.predict(features)
(prediction, label)
} println("testData count = " + testData.count())
println("predictionAndLabels count = " + predictionAndLabels.count())
predictionAndLabels.map(x => {"predicts: "+x._1+"--> labels:"+x._2}).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/predictionAndLabels") val metrics = new BinaryClassificationMetrics(predictionAndLabels) val precision = metrics.precisionByThreshold precision.map({case (t, p) =>
"Threshold: "+t+"Precision:"+p
}).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/precision") val recall = metrics.recallByThreshold recall.map({case (t, r) =>
"Threshold: "+t+"Recall:"+r
}).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/recall") val beta = 2
val f2Score = metrics.fMeasureByThreshold(beta) f2Score.map(x => {"Threshold: "+x._1+"--> F-score:"+x._2+"--> Beta = 2"})
.saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/f1Score") val prc = metrics.pr
prc.map(x => {"Recall: " + x._1 + "--> Precision: "+x._2 }).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/prc") // AUPRC,精度,召回曲线下的面积
val auPRC = metrics.areaUnderPR
println("Area under precision-recall curve = " +auPRC)
sc.makeRDD(Seq("Area under precision-recall curve = " +auPRC)).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/auPRC") //roc
val roc = metrics.roc
roc.map(x => {"FalsePositiveRate:" + x._1 + "--> Recall: " +x._2}).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/roc") // AUC
val auROC = metrics.areaUnderROC
sc.makeRDD(Seq("Area under ROC = " + +auROC)).saveAsTextFile(s"hdfs://ns1/user/szdsjkf/model_training/jrsc/20171218/auROC")
println("Area under ROC = " + auROC)
//val accuracy = 1.0 * predictionAndLabels.filter(x => x._1 == x._2).count() / testData.count() val dataInstance = hc.sql(s"select * from lkl_card_score.fqz_score_dataset_03train").map {
row =>
val arr = new ArrayBuffer[Double]()
//剔除label、phone字段
for (i <- 3 until row.size) {
if (row.isNullAt(i)) {
arr += 0.0
}
else if (row.get(i).isInstanceOf[Int])
arr += row.getInt(i).toDouble
else if (row.get(i).isInstanceOf[Double])
arr += row.getDouble(i)
else if (row.get(i).isInstanceOf[Long])
arr += row.getLong(i).toDouble
else if (row.get(i).isInstanceOf[String])
arr += 0.0
}
(row(0),row(1),row(2),Vectors.dense(arr.toArray))
} val preditDataGBDT = dataInstance.map { point =>
val prediction = model.predict(point._4)
//order_id,apply_time,score
(point._1,point._2,point._3,prediction)
} //rdd转dataFrame
val rowRDD = preditDataGBDT.map(row => Row(row._1.toString,row._2.toString,row._3.toString,row._4))
val schema = StructType(
List(
StructField("order_id", StringType, true),
StructField("apply_time", StringType, true),
StructField("label", StringType, true),
StructField("score", DoubleType, true)
)
)
//将RDD映射到rowRDD,schema信息应用到rowRDD上
val scoreDataFrame = hc.createDataFrame(rowRDD,schema)
scoreDataFrame.count()
scoreDataFrame.write.mode(SaveMode.Overwrite).saveAsTable("lkl_card_score.fqz_score_dataset_03train_predict") // 自己测试建模 val balance = hc.sql(s"select * from lkl_card_score.overdue_result_all_new_woe_instant_v3_02train where label='1' limit 85152 union all select * from lkl_card_score.overdue_result_all_new_woe_instant_v3_02train where label='0'").map {
row =>
val arr = new ArrayBuffer[Double]()
//剔除label、phone字段
for (i <- 3 until row.size) {
if (row.isNullAt(i)) {
arr += 0.0
}
else if (row.get(i).isInstanceOf[Int])
arr += row.getInt(i).toDouble
else if (row.get(i).isInstanceOf[Double])
arr += row.getDouble(i)
else if (row.get(i).isInstanceOf[Long])
arr += row.getLong(i).toDouble
else if (row.get(i).isInstanceOf[String])
arr += 0.0
}
LabeledPoint(row.getInt(2).toDouble, Vectors.dense(arr.toArray))
} // 逻辑回归是迭代算法,所以缓存训练数据的RDD
balance.cache()
val boostingStrategy1 = BoostingStrategy.defaultParams("Regression")
boostingStrategy1.setNumIterations(40) // Note: Use more iterations in practice.
boostingStrategy1.treeStrategy.setMaxDepth(6)
boostingStrategy1.treeStrategy.setMinInstancesPerNode(50) val model2 = GradientBoostedTrees.train(balance, boostingStrategy1) val predictionAndLabels2 = data.map { case LabeledPoint(label, features) =>
val prediction = model2.predict(features)
(prediction, label)
}
val metrics2 = new BinaryClassificationMetrics(predictionAndLabels2)
// AUPRC,精度,召回曲线下的面积
val auPRC1 = metrics2.areaUnderPR val preditDataGBDT1 = dataInstance.map { point =>
val prediction2 = model2.predict(point._4)
//order_id,apply_time,score
(point._1,point._2,point._3,prediction2)
}
//rdd转dataFrame
val rowRDD2 = preditDataGBDT1.map(row => Row(row._1.toString,row._2.toString,row._3.toString,row._4))
val schema2 = StructType(
List(
StructField("order_id", StringType, true),
StructField("apply_time", StringType, true),
StructField("label", StringType, true),
StructField("score", DoubleType, true)
)
) val scoreDataFrame2 = hc.createDataFrame(rowRDD2,schema2)
scoreDataFrame2.count()
scoreDataFrame2.write.mode(SaveMode.Overwrite).saveAsTable("lkl_card_score.fqz_score_dataset_02val_170506_predict") }
}

lakala反欺诈建模实际应用代码GBDT监督学习的更多相关文章

  1. AI反欺诈:千亿的蓝海,烫手的山芋|甲子光年

    不久前,一家业界领先的机器学习公司告诉「甲子光年」:常有客户带着迫切的反欺诈需求主动找来,但是,我们不敢接. 难点何在? 作者|晕倒羊 编辑|甲小姐 设计|孙佳栋 生死欺诈 企业越急速发展,越容易产生 ...

  2. 反欺诈(Fraud Detection)中所用到的机器学习模型

    反欺诈应用的机器模型算法,多为二分类算法. 1.gbdt梯度提升决策树(Gradient Boosting Decision Tree,GBDT)算法,该算法的性能高,且在各类数据挖掘中应用广泛,表现 ...

  3. 基于Vue2和Node.js的反欺诈系统设计与实现

    最近包工头喊农民工小郑搬砖,小郑搬完砖后沉思片刻,决定写篇小作文分享下,作为一个初学者的全栈项目,去学习它的搭建,到落地,再到部署维护,是非常好的. ​ ------题记 写在前面 通过本文的学习,你 ...

  4. 反编译工具 使用.NET JustDecompile来反编译你的程序代码

    原文地址:http://www.it165.net/pro/html/201310/7383.html 前言 在项目的进行中有时会碰到需要去了解由第三方所开发的程序代码或者因为年久已经遗失原始码的程序 ...

  5. python金融反欺诈-项目实战

    python信用评分卡(附代码,博主录制) https://study.163.com/course/introduction.htm?courseId=1005214003&utm_camp ...

  6. Android Studio 动态调试 apk 反编译出的 smali 代码

    在信安大赛的准备过程中,主要通过 Android Studio 动态调试 apk 反编译出来的 smali 代码的方式来对我们分析的执行流程进行验证.该技巧的主要流程在此记录.以下过程使用 Andro ...

  7. 使用.NET JustDecompile来反编译你的程序代码

    前言 在项目的进行中有时会碰到需要去了解由第三方所开发的程序代码或者因为年久已经遗失原始码的程序,由于因为是别人写的所以我们并没有原始码可以直接阅读,碰到这种情况我们就需要去反编译这些程序及 DLL ...

  8. OAF_开发系列28_实现OAF中反编译获取class包代码JD Compiler(案例)

    20150730 Created By BaoXinjian

  9. 使用VBA进行JS加密的反混淆,还原JS代码。

    本文地址:http://www.cnblogs.com/Charltsing/p/JSEval.html 联系QQ:564955427 类似下面的代码是登陆 全国企业信用信息公示系统(安徽)(网址:h ...

随机推荐

  1. 第22章 RTX 低功耗之停机模式

    以下内容转载自安富莱电子: http://forum.armfly.com/forum.php STM32F103 停机模式介绍 本章节我们主要讲解停机模式,停机模式是在 Cortex™-M3 的深睡 ...

  2. axublog 1.05代码审计

    00x1 安装漏洞 install/cmsconfig.php function step4(){ $root=$_POST["root"]; $dbuser=$_POST[&qu ...

  3. bash里wget失败

    直接使用wget是可以的,然而在shell脚本里却不行,后来发现原来是换行符的问题,编辑器默认的是\r\n,一不留神,自己把自己坑了

  4. [转]java按指定编码写入和读取文件内容的类

    读文件: BufferedReader 从字符输入流中读取文本,缓冲各个字符,从而提供字符.数组和行的高效读取. 可以指定缓冲区的大小,或者可使用默认的大小.大多数情况下,默认值就足够大了. 通常,R ...

  5. poj3073

    比赛状态堪忧,笑看自己找不着北.. 把心态放好吧- - 反正窝从一開始就仅仅是为了多学习才上道的 至少已经从学习和智商上给窝带来了一些帮助 智商带不动,好辛苦----(>_<)---- 说 ...

  6. Java多线程系列——深入重入锁ReentrantLock

    简述 ReentrantLock 是一个可重入的互斥(/独占)锁,又称为“独占锁”. ReentrantLock通过自定义队列同步器(AQS-AbstractQueuedSychronized,是实现 ...

  7. Python urllib2 proxy

    在 正式并入某大公司之后,网络必须设置为统一的proxy,好的方面没看到,但是立即让我一的一个小工具不能工作了.在之前使用urllib2库,无需设置proxy,一切工作正常.在必须使用proxy之后, ...

  8. 谷歌draco

    前不久,谷歌开源的Draco关于点云的编码与压缩的源码,Draco 由谷歌 Chrome 媒体团队设计,旨在大幅加速 3D 数据的编码.传输和解码.因为研发团队的 Chrome 背景,这个开源算法的首 ...

  9. Spring MVC异常处理SimpleMappingExceptionResolver

    Spring MVC异常处理SimpleMappingExceptionResolver[转] (2012-12-07 13:45:33) 转载▼ 标签: 杂谈 分类: 技术分享 Spring3.0中 ...

  10. spinlock一边连逻辑一边连控制器

    本文来自:我爱研发网(52RD.com) - R&D大本营详细出处:http://www.52rd.com/Blog/Archive_Thread.asp?SID=7179 spinlock的 ...