#-----------------------------------------------------------------------------#
# R in Action (2nd ed): Chapter 17 #
# Classification #
# requires packaged rpart, party, randomForest, kernlab, rattle #
# install.packages(c("rpart", "party", "randomForest", "e1071", "rpart.plot") #
# install.packages(rattle, dependencies = c("Depends", "Suggests")) #
#-----------------------------------------------------------------------------# par(ask=TRUE) # Listing 17.1 - Prepare the breast cancer data
loc <- "http://archive.ics.uci.edu/ml/machine-learning-databases/"
ds <- "breast-cancer-wisconsin/breast-cancer-wisconsin.data"
url <- paste(loc, ds, sep="") breast <- read.table(url, sep=",", header=FALSE, na.strings="?")
names(breast) <- c("ID", "clumpThickness", "sizeUniformity",
"shapeUniformity", "maginalAdhesion",
"singleEpithelialCellSize", "bareNuclei",
"blandChromatin", "normalNucleoli", "mitosis", "class") df <- breast[-1]
df$class <- factor(df$class, levels=c(2,4),
labels=c("benign", "malignant")) set.seed(1234)
train <- sample(nrow(df), 0.7*nrow(df))
df.train <- df[train,]
df.validate <- df[-train,]
table(df.train$class)
table(df.validate$class) # Listing 17.2 - Logistic regression with glm()
fit.logit <- glm(class~., data=df.train, family=binomial())
summary(fit.logit)
prob <- predict(fit.logit, df.validate, type="response")
logit.pred <- factor(prob > .5, levels=c(FALSE, TRUE),
labels=c("benign", "malignant"))
logit.perf <- table(df.validate$class, logit.pred,
dnn=c("Actual", "Predicted"))
logit.perf # Listing 17.3 - Creating a classical decision tree with rpart()
library(rpart)
set.seed(1234)
dtree <- rpart(class ~ ., data=df.train, method="class",
parms=list(split="information"))
dtree$cptable
plotcp(dtree) dtree.pruned <- prune(dtree, cp=.0125) library(rpart.plot)
prp(dtree.pruned, type = 2, extra = 104,
fallen.leaves = TRUE, main="Decision Tree") dtree.pred <- predict(dtree.pruned, df.validate, type="class")
dtree.perf <- table(df.validate$class, dtree.pred,
dnn=c("Actual", "Predicted"))
dtree.perf # Listing 17.4 - Creating a conditional inference tree with ctree()
library(party)
fit.ctree <- ctree(class~., data=df.train)
plot(fit.ctree, main="Conditional Inference Tree") ctree.pred <- predict(fit.ctree, df.validate, type="response")
ctree.perf <- table(df.validate$class, ctree.pred,
dnn=c("Actual", "Predicted"))
ctree.perf # Listing 17.5 - Random forest
library(randomForest)
set.seed(1234)
fit.forest <- randomForest(class~., data=df.train,
na.action=na.roughfix,
importance=TRUE)
fit.forest
importance(fit.forest, type=2)
forest.pred <- predict(fit.forest, df.validate)
forest.perf <- table(df.validate$class, forest.pred,
dnn=c("Actual", "Predicted"))
forest.perf # Listing 17.6 - A support vector machine
library(e1071)
set.seed(1234)
fit.svm <- svm(class~., data=df.train)
fit.svm
svm.pred <- predict(fit.svm, na.omit(df.validate))
svm.perf <- table(na.omit(df.validate)$class,
svm.pred, dnn=c("Actual", "Predicted"))
svm.perf # Listing 17.7 Tuning an RBF support vector machine (this can take a while)
set.seed(1234)
tuned <- tune.svm(class~., data=df.train,
gamma=10^(-6:1),
cost=10^(-10:10))
tuned
fit.svm <- svm(class~., data=df.train, gamma=.01, cost=1)
svm.pred <- predict(fit.svm, na.omit(df.validate))
svm.perf <- table(na.omit(df.validate)$class,
svm.pred, dnn=c("Actual", "Predicted"))
svm.perf # Listing 17.8 Function for assessing binary classification accuracy
performance <- function(table, n=2){
if(!all(dim(table) == c(2,2)))
stop("Must be a 2 x 2 table")
tn = table[1,1]
fp = table[1,2]
fn = table[2,1]
tp = table[2,2]
sensitivity = tp/(tp+fn)
specificity = tn/(tn+fp)
ppp = tp/(tp+fp)
npp = tn/(tn+fn)
hitrate = (tp+tn)/(tp+tn+fp+fn)
result <- paste("Sensitivity = ", round(sensitivity, n) ,
"\nSpecificity = ", round(specificity, n),
"\nPositive Predictive Value = ", round(ppp, n),
"\nNegative Predictive Value = ", round(npp, n),
"\nAccuracy = ", round(hitrate, n), "\n", sep="")
cat(result)
} # Listing 17.9 - Performance of breast cancer data classifiers
performance(dtree.perf)
performance(ctree.perf)
performance(forest.perf)
performance(svm.perf) # Using Rattle Package for data mining loc <- "http://archive.ics.uci.edu/ml/machine-learning-databases/"
ds <- "pima-indians-diabetes/pima-indians-diabetes.data"
url <- paste(loc, ds, sep="")
diabetes <- read.table(url, sep=",", header=FALSE)
names(diabetes) <- c("npregant", "plasma", "bp", "triceps",
"insulin", "bmi", "pedigree", "age", "class")
diabetes$class <- factor(diabetes$class, levels=c(0,1),
labels=c("normal", "diabetic"))
library(rattle)
rattle()

吴裕雄--天生自然 R语言开发学习:分类(续二)的更多相关文章

  1. 吴裕雄--天生自然 R语言开发学习:R语言的安装与配置

    下载R语言和开发工具RStudio安装包 先安装R

  2. 吴裕雄--天生自然 R语言开发学习:数据集和数据结构

    数据集的概念 数据集通常是由数据构成的一个矩形数组,行表示观测,列表示变量.表2-1提供了一个假想的病例数据集. 不同的行业对于数据集的行和列叫法不同.统计学家称它们为观测(observation)和 ...

  3. 吴裕雄--天生自然 R语言开发学习:导入数据

    2.3.6 导入 SPSS 数据 IBM SPSS数据集可以通过foreign包中的函数read.spss()导入到R中,也可以使用Hmisc 包中的spss.get()函数.函数spss.get() ...

  4. 吴裕雄--天生自然 R语言开发学习:使用键盘、带分隔符的文本文件输入数据

    R可从键盘.文本文件.Microsoft Excel和Access.流行的统计软件.特殊格 式的文件.多种关系型数据库管理系统.专业数据库.网站和在线服务中导入数据. 使用键盘了.有两种常见的方式:用 ...

  5. 吴裕雄--天生自然 R语言开发学习:R语言的简单介绍和使用

    假设我们正在研究生理发育问 题,并收集了10名婴儿在出生后一年内的月龄和体重数据(见表1-).我们感兴趣的是体重的分 布及体重和月龄的关系. 可以使用函数c()以向量的形式输入月龄和体重数据,此函 数 ...

  6. 吴裕雄--天生自然 R语言开发学习:基础知识

    1.基础数据结构 1.1 向量 # 创建向量a a <- c(1,2,3) print(a) 1.2 矩阵 #创建矩阵 mymat <- matrix(c(1:10), nrow=2, n ...

  7. 吴裕雄--天生自然 R语言开发学习:图形初阶(续二)

    # ----------------------------------------------------# # R in Action (2nd ed): Chapter 3 # # Gettin ...

  8. 吴裕雄--天生自然 R语言开发学习:图形初阶(续一)

    # ----------------------------------------------------# # R in Action (2nd ed): Chapter 3 # # Gettin ...

  9. 吴裕雄--天生自然 R语言开发学习:图形初阶

    # ----------------------------------------------------# # R in Action (2nd ed): Chapter 3 # # Gettin ...

  10. 吴裕雄--天生自然 R语言开发学习:基本图形(续二)

    #---------------------------------------------------------------# # R in Action (2nd ed): Chapter 6 ...

随机推荐

  1. JQuery 点击子控件事件,不会触发父控件的事件

     $('.order-delete').on('tap', function (e) {                  console.log('删除1');                  c ...

  2. ZJNU 1067 - 约瑟夫——中级

    打表处理(否则Case 1超时) 对m进行枚举,每次枚举进行一次判断 因为好人坏人均为k个,那么只要让下一个死亡的人的位置p保证在1~剩余坏人数量之间即可,不满足则直接break枚举下一个m 实际上对 ...

  3. dht算法原理描述

    dht原理 dht是P2P网络(结构化P2P)核心路由算法,主要是利用一致性hash,把节点和资源都表示成一个hash值,放入到这个大的hash环中,每个节点负责路由靠近它的资源. 一.重要概念:  ...

  4. Window命令行切换命令

    Windows 命令行切换目录 特别注意:切换到其它盘符不需要 cd 命令 1. 切换到 C 盘根目录 打开终端 cmd 后,输入cd C:\(一定要加上后面的反斜扛) 2.切换到 C 盘子目录 打开 ...

  5. HDU - 1754 线段树

    #include <algorithm> #include <iostream> #include<sstream> #include<cstring> ...

  6. maven坐标 加速下载

    <repositories> <repository> <id>aliyun</id> <name>aliyun</name> ...

  7. Java线程池面试

    New Thread的弊端 每次new Thread会新建对象,性能差 线程缺乏统一管理,可能无限制的新建线程,相互竞争,有可能占用过多系统资源导致死机或OOM 缺少更多功能,如更多执行.定期执行.线 ...

  8. mybatis <where>标签别勿用,否则线上清表

    公司线上发送了清表现象,排除发现: <delete id="scMxdxxxb"> DELETE TABLE <where> <foreach col ...

  9. vim 复制 单个 单词: 移动光标到单词词首,快速摁 yw

    vim 复制 单个 单词:   移动光标到单词词首,快速摁 yw

  10. PyTorch基础——预测共享单车的使用量

    预处理实验数据 读取数据 下载数据 网盘链接:https://pan.baidu.com/s/1n_FtZjAswWR9rfuI6GtDhA 提取码:y4fb #导入需要使用的库 import num ...