22.01.Cluster
1. 클러스터링 iris 데이터셋 확인¶
from sklearn import cluster
from sklearn import datasets iris = datasets.load_iris()
data = iris['data'] model = cluster.KMeans( n_clusters=3 )
model.fit( data ) print(model.labels_)
[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
2 2 2 0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 0 2 0 0 0 0 2 0 0 0 0
0 0 2 2 0 0 0 0 2 0 2 0 2 0 0 2 2 0 0 0 0 0 2 0 0 0 0 2 0 0 0 2 0 0 0 2 0
0 2]
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets # iris 데이터를 로드
iris = datasets.load_iris()
data = iris["data"] # 학습 → 클러스터 생성
model = cluster.KMeans(n_clusters=3)
model.fit(data)
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
n_clusters=3, n_init=10, n_jobs=None, precompute_distances='auto',
random_state=None, tol=0.0001, verbose=0)
# 학습 결과의 라벨 취득
labels = model.labels_ ### 그래프 그리기
x_index = 2
y_index = 3 data_x=data[:,x_index]
data_y=data[:,y_index] x_max = 7.5
x_min = 0
y_max = 3
y_min = 0
x_label = iris["feature_names"][x_index]
y_label = iris["feature_names"][y_index] plt.scatter(data_x[labels==0], data_y[labels==0],c='black' ,alpha=0.3,s=100, marker="o",label="cluster 0")
plt.scatter(data_x[labels==1], data_y[labels==1],c='black' ,alpha=0.3,s=100, marker="o",label="cluster 1")
plt.scatter(data_x[labels==2], data_y[labels==2],c='black' ,alpha=0.3,s=100, marker="o",label="cluster 2")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xlabel(x_label,fontsize='large')
plt.ylabel(y_label,fontsize='large')
plt.show()
" alt="" />
2. k-means 모델 사용¶
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets # iris 데이터를 로드
iris = datasets.load_iris()
data = iris["data"] # 초기 중심점을 정의
init_centers=np.array([
[4,2.5,3,0],
[5,3 ,3,1],
[6,4 ,3,2]]) # 데이터 정의와 값 꺼내기
x_index = 1
y_index = 2 data_x=data[:,x_index]
data_y=data[:,y_index] # 그래프의 스케일과 라벨 정의
x_max = 4.5
x_min = 2
y_max = 7
y_min = 1
x_label = iris["feature_names"][x_index]
y_label = iris["feature_names"][y_index] def show_result(cluster_centers,labels):
# cluster 0과 중심점을 그리기
plt.scatter(data_x[labels==0], data_y[labels==0],c='black' ,alpha=0.3,s=100, marker="o",label="cluster 0")
plt.scatter(cluster_centers[0][x_index], cluster_centers[0][y_index],facecolors='white', edgecolors='black', s=300, marker="o") # cluster 1과 중심점을 그리기
plt.scatter(data_x[labels==1], data_y[labels==1],c='black' ,alpha=0.3,s=100, marker="^",label="cluster 1")
plt.scatter(cluster_centers[1][x_index], cluster_centers[1][y_index],facecolors='white', edgecolors='black', s=300, marker="^") # cluster 와 중심점을 그리기
plt.scatter(data_x[labels==2], data_y[labels==2],c='black' ,alpha=0.3,s=100, marker="*",label="cluster 2")
plt.scatter(cluster_centers[2][x_index], cluster_centers[2][y_index],facecolors='white', edgecolors='black', s=500, marker="*") # 그래프의 스케일과 축 라벨을 설정
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xlabel(x_label,fontsize='large')
plt.ylabel(y_label,fontsize='large')
plt.show() # 초기 상태를 표시
labels=np.zeros(len(data),dtype=np.int)
show_result(init_centers,labels) for i in range(5):
model = cluster.KMeans(n_clusters=3,max_iter=1,init=init_centers).fit(data)
labels = model.labels_
init_centers=model.cluster_centers_
show_result(init_centers,labels)
" alt="" />
C:\ProgramData\Anaconda3\envs\machine\lib\site-packages\sklearn\cluster\k_means_.py:969: RuntimeWarning: Explicit initial center position passed: performing only one init in k-means instead of n_init=10
return_n_iter=True)
" alt="" />
C:\ProgramData\Anaconda3\envs\machine\lib\site-packages\sklearn\cluster\k_means_.py:969: RuntimeWarning: Explicit initial center position passed: performing only one init in k-means instead of n_init=10
return_n_iter=True)
" alt="" />
C:\ProgramData\Anaconda3\envs\machine\lib\site-packages\sklearn\cluster\k_means_.py:969: RuntimeWarning: Explicit initial center position passed: performing only one init in k-means instead of n_init=10
return_n_iter=True)
" alt="" />
C:\ProgramData\Anaconda3\envs\machine\lib\site-packages\sklearn\cluster\k_means_.py:969: RuntimeWarning: Explicit initial center position passed: performing only one init in k-means instead of n_init=10
return_n_iter=True)
" alt="" />
C:\ProgramData\Anaconda3\envs\machine\lib\site-packages\sklearn\cluster\k_means_.py:969: RuntimeWarning: Explicit initial center position passed: performing only one init in k-means instead of n_init=10
return_n_iter=True)
" alt="" />
2. k-means 모델 사용 2¶
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets # iris 데이터를 로드
iris = datasets.load_iris()
data = iris['data'] # 학습 → 클러스터 생성
model = cluster.KMeans(n_clusters=3)
model.fit(data)
KMeans(algorithm='auto', copy_x=True, init='k-means++', max_iter=300,
n_clusters=3, n_init=10, n_jobs=None, precompute_distances='auto',
random_state=None, tol=0.0001, verbose=0)
# 학습 결과의 라벨 취득
labels = model.labels_ # 그래프 그리기
ldata = data[labels == 0]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="o") ldata = data[labels == 1]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="^") ldata = data[labels == 2]
plt.scatter(ldata[:, 2], ldata[:, 3],
c='black' ,alpha=0.3,s=100 ,marker="*") # 축 라벨의 설정
plt.xlabel(iris["feature_names"][2],fontsize='large')
plt.ylabel(iris["feature_names"][3],fontsize='large') plt.show()
" alt="" />
3. 속성값 4개를 2개씩 조합하여 출력¶
import matplotlib.pyplot as plt
from sklearn import cluster
from sklearn import datasets # iris 데이터를 로드
iris = datasets.load_iris()
data = iris['data'] # 학습 → 클러스터 생성
model = cluster.KMeans(n_clusters=3)
model.fit(data) # 学習結果のラベル取得
labels = model.labels_ ### グラフの描画
MARKERS = ["o", "^" , "*" , "v", "+", "x", "d", "p", "s", "1", "2"] # 指定されたインデックスの feature 値で散布図を作成する関数
def scatter_by_features(feat_idx1, feat_idx2):
for lbl in range(labels.max() + 1):
clustered = data[labels == lbl]
plt.scatter(clustered[:, feat_idx1], clustered[:, feat_idx2],
c='black' ,alpha=0.3,s=100,
marker=MARKERS[lbl], label='label {}'.format(lbl)) plt.xlabel(iris["feature_names"][feat_idx1],fontsize='xx-large')
plt.ylabel(iris["feature_names"][feat_idx2],fontsize='xx-large') plt.figure(figsize=(16, 16)) # feature "sepal length" と "sepal width"
plt.subplot(3, 2, 1)
scatter_by_features(0, 1) # feature "sepal length" と "petal length"
plt.subplot(3, 2, 2)
scatter_by_features(0, 2) # feature "sepal length" と "petal width"
plt.subplot(3, 2, 3)
scatter_by_features(0, 3) # feature "sepal width" と "petal length"
plt.subplot(3, 2, 4)
scatter_by_features(1, 2) # feature "sepal width" と "petal width"
plt.subplot(3, 2, 5)
scatter_by_features(1, 3) # feature "petal length" と "petal width"
plt.subplot(3, 2, 6)
scatter_by_features(2, 3) plt.tight_layout()
plt.show()
" alt="" />
22.01.Cluster的更多相关文章
- Struts知识问答 分类: 面试 2015-07-10 22:01 4人阅读 评论(0) 收藏
1. 简述Struts框架的初始化流程. 答案: 对于采用Struts框架的Web应用,在Web应用启动时就会加载并初始化控制器ActionServlet ActionServlet从struts-c ...
- 快速幂取模 分类: ACM TYPE 2014-08-29 22:01 95人阅读 评论(0) 收藏
#include<stdio.h> #include<stdlib.h> //快速幂算法,数论二分 long long powermod(int a,int b, int c) ...
- NYOJ-58 最小步数 AC 分类: NYOJ 2014-01-22 22:01 217人阅读 评论(0) 收藏
#include<stdio.h> void dfs(int step,int x,int y); int d[4][2] = {{1,0},{-1,0},{0,1},{0,-1}}; i ...
- C语言基础:枚举.宏 分类: iOS学习 c语言基础 2015-06-10 22:01 20人阅读 评论(0) 收藏
枚举:一组有符号的整型常量,一 一列举所有的状态 枚举常和switch连用 enum week{ monday=1, tuesday, wednesday, thursday, friday, sat ...
- 设置secureCRT中vim的字体颜色 分类: B3_LINUX 2014-07-12 22:01 1573人阅读 评论(0) 收藏
1.在/etc/vimrc新增以下一行 syntax on 注:上述方法对root用户无效,原因为在一般用户中,alias vi=vim,而在root用户中默认无此设置,因此若需要root用户也显示颜 ...
- 开一场自己的特斯拉灯光秀「GitHub 热点速览 v.22.01」
作者:HelloGitHub-小鱼干 新的一年开始了,本周特推是两款有意思的游戏,一个是末日生存游戏,让你有"危机感"地过好新的一年,而另外一款则是编程游戏,有代码就有一切的一款游 ...
- Elasticsearch之重要核心概念(cluster(集群)、shards(分配)、replicas(索引副本)、recovery(据恢复或叫数据重新分布)、gateway(es索引的持久化存储方式)、discovery.zen(es的自动发现节点机制机制)、Transport(内部节点或集群与客户端的交互方式)、settings(修改索引库默认配置)和mappings)
Elasticsearch之重要核心概念如下: 1.cluster 代表一个集群,集群中有多个节点,其中有一个为主节点,这个主节点是可以通过选举产生的,主从节点是对于集群内部来说的.es的一个概念就是 ...
- JavaSE_ IO流 总目录(19~22)
JavaSE学习总结第19天_IO流119.01 集合的特点和数据结构总结19.02 如何选择使用哪种集合19.03 集合常见功能和遍历方式总结19.04 异常的概述和分类19.05 JVM默认处理异 ...
- JavaSE学习总结第22天_IO流4
- 22.01 数据输入输出流的概述和讲解 操作基本数据类型 public class DataInputStreamextends FilterInputStream implements Da ...
随机推荐
- cornerstone使用beyond compare比较工具
prefrences->general->external compare tool->open script foder 新建一个bc.sh文件(名字可以任意,sh后缀即可)保存到 ...
- jQuery on 绑定的事件 执行两次
$(".class1").on("click",".class2",function(){ alert('提示'); }); 上面代码,怎么 ...
- JS数组方法(ES5、ES6)
1. arr.push() 从后面添加元素,添加一个或多个,返回值为添加完后的数组长度 let arr = [1,2,3,4,5] console.log(arr.push(6,7)) // 7 3 ...
- springboot 报错 org.springframework.beans.factory.NoSuchBeanDefinitionException:No qualifying bean of type 'com.example.service.HrService' available: 有没有大佬出个主意,我找了一天,刚入门springboot
话不多说先上图,这是启动类的配置,这里配置了@ComponentScan("我的mapper的接口") 接下来是我的项目结构截图 然后是service 的截图,我在这里加了注解@S ...
- Win10查看屏保的所在位置
路径 C:\Users\Hlzy\AppData\Local\Packages\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\Local ...
- Mysql-分库分区分表
大数据-分表分区分库问题集:1.分表时两张表的数据都一样么?如果一样,只是降低了表的访问量,但是如果数据多了还是没有提高查询的效率答:分表是对一张表中的数据按照某种规则进行拆分到三张表中,三张表的所有 ...
- 破局AI落地难,数据标注行业需率先变革丨曼孚科技
2019年,国内人工智能领域的投融资热情大幅降低,相当数量的AI企业彻底消失在了历史的长河中,“人工智能寒潮已至”甚至成为行业年度热词. 与前几年创业与投资热情齐头并进的盛况相比,近段时间的AI行业 ...
- 深度优先搜索DFS---求出矩阵中“块”的个数。
题目: 给出一个 m x n 的矩阵,矩阵中的元素为0或1.如果矩阵中有若干个 1是相邻的,那么称这些1构成了一个“块”.求给定的矩阵中“块”的个数. 0 0 0 0 0 0 0 0 0 0 0 0 ...
- Certificate Request Processor: Invalid provider specified. 0x80090013 (-2146893805 NTE_BAD_PROVIDER)
通过INF文件创建CSR时遇到Certificate Request Processor: Invalid provider specified. 0x80090013 (-2146893805 NT ...
- C#排序算法的实现---选择排序
一.算法原理 每一趟从待排序的数据元素中选出最小(或最大)的一个元素,顺序放在已排好序的数列的最后,直到全部待排序的数据元素排完. 选择排序算法的运作如下: 1.对比数组中前一个元素跟后一个元素的大小 ...